-
Notifications
You must be signed in to change notification settings - Fork 580
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
✨ ROSA: Reconcile ROSAMachinePool fields #4804
Changes from 4 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -115,6 +115,7 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c | |
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools,verbs=get;list;watch | ||
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes,verbs=get;list;watch;update;patch;delete | ||
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/status,verbs=get;update;patch | ||
// +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/finalizers,verbs=update | ||
|
||
// Reconcile will reconcile RosaControlPlane Resources. | ||
func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { | ||
|
@@ -267,7 +268,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc | |
billingAccount = rosaScope.ControlPlane.Spec.BillingAccount | ||
} | ||
|
||
spec := ocm.Spec{ | ||
ocmClusterSpec := ocm.Spec{ | ||
DryRun: ptr.To(false), | ||
Name: rosaScope.RosaClusterName(), | ||
Region: *rosaScope.ControlPlane.Spec.Region, | ||
|
@@ -281,7 +282,6 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc | |
|
||
SubnetIds: rosaScope.ControlPlane.Spec.Subnets, | ||
AvailabilityZones: rosaScope.ControlPlane.Spec.AvailabilityZones, | ||
NetworkType: rosaScope.ControlPlane.Spec.Network.NetworkType, | ||
IsSTS: true, | ||
RoleARN: *rosaScope.ControlPlane.Spec.InstallerRoleARN, | ||
SupportRoleARN: *rosaScope.ControlPlane.Spec.SupportRoleARN, | ||
|
@@ -296,44 +296,48 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc | |
AWSCreator: creator, | ||
} | ||
|
||
_, machineCIDR, err := net.ParseCIDR(rosaScope.ControlPlane.Spec.Network.MachineCIDR) | ||
if err == nil { | ||
spec.MachineCIDR = *machineCIDR | ||
} else { | ||
// TODO: expose in status | ||
rosaScope.Error(err, "rosacontrolplane.spec.network.machineCIDR invalid", rosaScope.ControlPlane.Spec.Network.MachineCIDR) | ||
return ctrl.Result{}, nil | ||
} | ||
if networkSpec := rosaScope.ControlPlane.Spec.Network; networkSpec != nil { | ||
if networkSpec.MachineCIDR != "" { | ||
_, machineCIDR, err := net.ParseCIDR(networkSpec.MachineCIDR) | ||
if err != nil { | ||
// TODO: expose in status | ||
rosaScope.Error(err, "rosacontrolplane.spec.network.machineCIDR invalid", networkSpec.MachineCIDR) | ||
return ctrl.Result{}, nil | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we move this to a validation webhook, and just return the error here (backoff loop) or set a condition? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Returning an error when the data provided by the user is incorrect would only lead to infinite retry by the controller and not result in a good outcome. It seems universally correct to expose states like this in status so that the user knows that they need to update the object's |
||
} | ||
ocmClusterSpec.MachineCIDR = *machineCIDR | ||
} | ||
|
||
if rosaScope.ControlPlane.Spec.Network.PodCIDR != "" { | ||
_, podCIDR, err := net.ParseCIDR(rosaScope.ControlPlane.Spec.Network.PodCIDR) | ||
if err == nil { | ||
spec.PodCIDR = *podCIDR | ||
} else { | ||
// TODO: expose in status. | ||
rosaScope.Error(err, "rosacontrolplane.spec.network.podCIDR invalid", rosaScope.ControlPlane.Spec.Network.PodCIDR) | ||
return ctrl.Result{}, nil | ||
if networkSpec.PodCIDR != "" { | ||
_, podCIDR, err := net.ParseCIDR(networkSpec.PodCIDR) | ||
if err != nil { | ||
// TODO: expose in status. | ||
rosaScope.Error(err, "rosacontrolplane.spec.network.podCIDR invalid", networkSpec.PodCIDR) | ||
return ctrl.Result{}, nil | ||
} | ||
ocmClusterSpec.PodCIDR = *podCIDR | ||
} | ||
} | ||
|
||
if rosaScope.ControlPlane.Spec.Network.ServiceCIDR != "" { | ||
_, serviceCIDR, err := net.ParseCIDR(rosaScope.ControlPlane.Spec.Network.ServiceCIDR) | ||
if err == nil { | ||
spec.ServiceCIDR = *serviceCIDR | ||
} else { | ||
// TODO: expose in status. | ||
rosaScope.Error(err, "rosacontrolplane.spec.network.serviceCIDR invalid", rosaScope.ControlPlane.Spec.Network.ServiceCIDR) | ||
return ctrl.Result{}, nil | ||
if networkSpec.ServiceCIDR != "" { | ||
_, serviceCIDR, err := net.ParseCIDR(networkSpec.ServiceCIDR) | ||
if err != nil { | ||
// TODO: expose in status. | ||
rosaScope.Error(err, "rosacontrolplane.spec.network.serviceCIDR invalid", networkSpec.ServiceCIDR) | ||
return ctrl.Result{}, nil | ||
} | ||
ocmClusterSpec.ServiceCIDR = *serviceCIDR | ||
} | ||
|
||
ocmClusterSpec.HostPrefix = networkSpec.HostPrefix | ||
ocmClusterSpec.NetworkType = networkSpec.NetworkType | ||
} | ||
|
||
// Set autoscale replica | ||
if rosaScope.ControlPlane.Spec.Autoscaling != nil { | ||
spec.MaxReplicas = rosaScope.ControlPlane.Spec.Autoscaling.MaxReplicas | ||
spec.MinReplicas = rosaScope.ControlPlane.Spec.Autoscaling.MinReplicas | ||
ocmClusterSpec.MaxReplicas = rosaScope.ControlPlane.Spec.Autoscaling.MaxReplicas | ||
ocmClusterSpec.MinReplicas = rosaScope.ControlPlane.Spec.Autoscaling.MinReplicas | ||
} | ||
|
||
cluster, err = ocmClient.CreateCluster(spec) | ||
cluster, err = ocmClient.CreateCluster(ocmClusterSpec) | ||
if err != nil { | ||
// TODO: need to expose in status, as likely the spec is invalid | ||
return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err) | ||
|
Original file line number | Diff line number | Diff line change | ||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -17,6 +17,7 @@ limitations under the License. | |||||||||||||||||
package v1beta2 | ||||||||||||||||||
|
||||||||||||||||||
import ( | ||||||||||||||||||
corev1 "k8s.io/api/core/v1" | ||||||||||||||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||||||||||||||||
|
||||||||||||||||||
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" | ||||||||||||||||||
|
@@ -45,37 +46,64 @@ type RosaMachinePoolSpec struct { | |||||||||||||||||
// +optional | ||||||||||||||||||
AvailabilityZone string `json:"availabilityZone,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="subnet is immutable" | ||||||||||||||||||
// | ||||||||||||||||||
// +immutable | ||||||||||||||||||
// +optional | ||||||||||||||||||
Subnet string `json:"subnet,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// Labels specifies labels for the Kubernetes node objects | ||||||||||||||||||
// +optional | ||||||||||||||||||
Labels map[string]string `json:"labels,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// Taints specifies the taints to apply to the nodes of the machine pool | ||||||||||||||||||
// +optional | ||||||||||||||||||
Taints []RosaTaint `json:"taints,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// AutoRepair specifies whether health checks should be enabled for machines | ||||||||||||||||||
// in the NodePool. The default is false. | ||||||||||||||||||
// +optional | ||||||||||||||||||
// +kubebuilder:default=false | ||||||||||||||||||
AutoRepair bool `json:"autoRepair,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// +kubebuilder:validation:Required | ||||||||||||||||||
// | ||||||||||||||||||
// InstanceType specifies the AWS instance type | ||||||||||||||||||
InstanceType string `json:"instanceType,omitempty"` | ||||||||||||||||||
InstanceType string `json:"instanceType"` | ||||||||||||||||||
|
||||||||||||||||||
// Autoscaling specifies auto scaling behaviour for this MachinePool. | ||||||||||||||||||
// required if Replicas is not configured | ||||||||||||||||||
// +optional | ||||||||||||||||||
Autoscaling *RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// TODO(alberto): Enable and propagate this API input. | ||||||||||||||||||
// Taints []*Taint `json:"taints,omitempty"` | ||||||||||||||||||
// TuningConfigs []string `json:"tuningConfigs,omitempty"` | ||||||||||||||||||
// Version *Version `json:"version,omitempty"` | ||||||||||||||||||
// TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool. | ||||||||||||||||||
// Tuning configs must already exist. | ||||||||||||||||||
// +optional | ||||||||||||||||||
TuningConfigs []string `json:"tuningConfigs,omitempty"` | ||||||||||||||||||
|
||||||||||||||||||
// ProviderIDList contain a ProviderID for each machine instance that's currently managed by this machine pool. | ||||||||||||||||||
// +optional | ||||||||||||||||||
ProviderIDList []string `json:"providerIDList,omitempty"` | ||||||||||||||||||
} | ||||||||||||||||||
|
||||||||||||||||||
type RosaTaint struct { | ||||||||||||||||||
// +kubebuilder:validation:Required | ||||||||||||||||||
// | ||||||||||||||||||
// The taint key to be applied to a node. | ||||||||||||||||||
Key string `json:"key"` | ||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We usually have the
Suggested change
|
||||||||||||||||||
// +kubebuilder:validation:Pattern:=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$` | ||||||||||||||||||
// | ||||||||||||||||||
// The taint value corresponding to the taint key. | ||||||||||||||||||
// +optional | ||||||||||||||||||
Value string `json:"value,omitempty"` | ||||||||||||||||||
// +kubebuilder:validation:Required | ||||||||||||||||||
// +kubebuilder:validation:Enum=NoSchedule;PreferNoSchedule;NoExecute | ||||||||||||||||||
// | ||||||||||||||||||
// The effect of the taint on pods that do not tolerate the taint. | ||||||||||||||||||
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute. | ||||||||||||||||||
Effect corev1.TaintEffect `json:"effect"` | ||||||||||||||||||
} | ||||||||||||||||||
|
||||||||||||||||||
// RosaMachinePoolAutoScaling specifies scaling options. | ||||||||||||||||||
type RosaMachinePoolAutoScaling struct { | ||||||||||||||||||
// +kubebuilder:validation:Minimum=1 | ||||||||||||||||||
|
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why is this needed?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is needed when OwnerReferencesPermissionEnforcement plugin is enabled
https://sdk.operatorframework.io/docs/faqs/#after-deploying-my-operator-why-do-i-see-errors-like-is-forbidden-cannot-set-blockownerdeletion-if-an-ownerreference-refers-to-a-resource-you-cant-set-finalizers-on-