Skip to content

Commit

Permalink
Support node/pod affinity, tolerations and topologySpreadConstraints …
Browse files Browse the repository at this point in the history
…that CNPG supports (#717)
  • Loading branch information
nhudson authored Apr 19, 2024
1 parent ed6b2ce commit 93276cb
Show file tree
Hide file tree
Showing 18 changed files with 2,734 additions and 16 deletions.
2 changes: 1 addition & 1 deletion charts/tembo-operator/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: tembo-operator
description: 'Helm chart to deploy the tembo-operator'
type: application
icon: https://cloud.tembo.io/images/TemboElephant.png
version: 0.5.0
version: 0.5.1
home: https://tembo.io
sources:
- https://github.com/tembo-io/tembo
Expand Down
650 changes: 650 additions & 0 deletions charts/tembo-operator/templates/crd.yaml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion tembo-operator/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion tembo-operator/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "controller"
description = "Tembo Operator for Postgres"
version = "0.44.1"
version = "0.45.0"
edition = "2021"
default-run = "controller"
license = "Apache-2.0"
Expand Down
32 changes: 32 additions & 0 deletions tembo-operator/src/apis/coredb_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use k8s_openapi::{
apimachinery::pkg::{api::resource::Quantity, apis::meta::v1::ObjectMeta},
};

use crate::cloudnativepg::clusters::{ClusterAffinity, ClusterTopologySpreadConstraints};
use crate::cloudnativepg::poolers::{
PoolerPgbouncerPoolMode, PoolerTemplateSpecContainersResources,
};
Expand Down Expand Up @@ -554,6 +555,37 @@ pub struct CoreDBSpec {
/// **Default**: `None` (uses the `default` StorageClass in your cluster)
#[serde(rename = "storageClass")]
pub storage_class: Option<String>,

/// A AffinityConfiguration provides a way to configure the CoreDB instance to run
/// on specific nodes in the cluster based off of nodeSelector, nodeAffinity and tolerations
///
/// For more informaton on AffinityConfiguration please see the [Cloudnative-PG documentation](https://cloudnative-pg.io/documentation/1.22/cloudnative-pg.v1/#postgresql-cnpg-io-v1-AffinityConfiguration)
///
/// **Default**:
/// ```yaml
/// apiVersion: coredb.io/v1alpha1
/// kind: CoreDB
/// metadata:
/// name: test-db-restore
/// spec:
/// affinityConfiguration:
/// podAntiAffinityType: preferred
/// topologyKey: topology.kubernetes.io/zone
/// ```
#[serde(
rename = "affinityConfiguration",
default = "defaults::default_affinity_configuration"
)]
pub affinity_configuration: Option<ClusterAffinity>,

/// The topologySpreadConstraints provides a way to spread matching pods among the given topology
///
/// For more information see the Kubernetes documentation on [Topology Spread Constraints](https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/)
/// Tembo is compatable with the `v1` version of the TopologySpreadConstraints up to [Kubernetes 1.25](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#topologyspreadconstraint-v1-core)
///
/// **Default**: `None`
#[serde(rename = "topologySpreadConstraints")]
pub topology_spread_constraints: Option<Vec<ClusterTopologySpreadConstraints>>,
}

impl CoreDBSpec {
Expand Down
22 changes: 20 additions & 2 deletions tembo-operator/src/app_service/manager.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use crate::{
apis::coredb_types::CoreDB, ingress_route_crd::IngressRouteRoutes, Context, Error, Result,
apis::coredb_types::CoreDB, cloudnativepg::placement::cnpg_placement::PlacementConfig,
ingress_route_crd::IngressRouteRoutes, Context, Error, Result,
};
use k8s_openapi::{
api::{
Expand Down Expand Up @@ -57,6 +58,7 @@ fn generate_resource(
oref: OwnerReference,
domain: Option<String>,
annotations: &BTreeMap<String, String>,
placement: Option<PlacementConfig>,
) -> AppServiceResources {
let resource_name = format!("{}-{}", coredb_name, appsvc.name.clone());
let service = appsvc.routing.as_ref().map(|_| {
Expand All @@ -76,6 +78,7 @@ fn generate_resource(
namespace,
oref,
annotations,
placement,
);

// If DATA_PLANE_BASEDOMAIN is not set, don't generate IngressRoutes, IngressRouteTCPs, or EntryPoints
Expand Down Expand Up @@ -226,6 +229,7 @@ fn generate_deployment(
namespace: &str,
oref: OwnerReference,
annotations: &BTreeMap<String, String>,
placement: Option<PlacementConfig>,
) -> Deployment {
let mut labels: BTreeMap<String, String> = BTreeMap::new();
labels.insert("app".to_owned(), resource_name.to_string());
Expand Down Expand Up @@ -451,7 +455,14 @@ fn generate_deployment(
}
}

let affinity = placement.as_ref().and_then(|p| p.combine_affinity_items());
let tolerations = placement.as_ref().map(|p| p.tolerations.clone());
let topology_spread_constraints = placement
.as_ref()
.and_then(|p| p.topology_spread_constraints.clone());

let pod_spec = PodSpec {
affinity,
containers: vec![Container {
args: appsvc.args.clone(),
command: appsvc.command.clone(),
Expand All @@ -466,6 +477,8 @@ fn generate_deployment(
volume_mounts: Some(volume_mounts),
..Container::default()
}],
tolerations,
topology_spread_constraints,
volumes: Some(volumes),
security_context: pod_security_context,
..PodSpec::default()
Expand Down Expand Up @@ -625,7 +638,11 @@ fn generate_appsvc_annotations(cdb: &CoreDB) -> BTreeMap<String, String> {
)
}

pub async fn reconcile_app_services(cdb: &CoreDB, ctx: Arc<Context>) -> Result<(), Action> {
pub async fn reconcile_app_services(
cdb: &CoreDB,
ctx: Arc<Context>,
placement: Option<PlacementConfig>,
) -> Result<(), Action> {
let client = ctx.client.clone();
let ns = cdb.namespace().unwrap();
let coredb_name = cdb.name_any();
Expand Down Expand Up @@ -761,6 +778,7 @@ pub async fn reconcile_app_services(cdb: &CoreDB, ctx: Arc<Context>) -> Result<(
oref.clone(),
domain.to_owned(),
&annotations,
placement.clone(),
)
})
.collect();
Expand Down
1 change: 1 addition & 0 deletions tembo-operator/src/cloudnativepg/clusters.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
// WARNING: generated by kopium - manual changes will be overwritten

// kopium command: kopium -D Default clusters.postgresql.cnpg.io -A
// kopium version: 0.16.5

Expand Down
31 changes: 22 additions & 9 deletions tembo-operator/src/cloudnativepg/cnpg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::{
cloudnativepg::{
backups::Backup,
clusters::{
Cluster, ClusterAffinity, ClusterBackup, ClusterBackupBarmanObjectStore,
Cluster, ClusterBackup, ClusterBackupBarmanObjectStore,
ClusterBackupBarmanObjectStoreData, ClusterBackupBarmanObjectStoreDataCompression,
ClusterBackupBarmanObjectStoreDataEncryption,
ClusterBackupBarmanObjectStoreS3Credentials,
Expand Down Expand Up @@ -43,6 +43,7 @@ use crate::{
ClusterStorage, ClusterSuperuserSecret,
},
cnpg_utils::{is_image_updated, patch_cluster, restart_and_wait_for_restart},
placement::cnpg_placement::PlacementConfig,
poolers::{
Pooler, PoolerCluster, PoolerPgbouncer, PoolerSpec, PoolerTemplate, PoolerTemplateSpec,
PoolerTemplateSpecContainers, PoolerType,
Expand Down Expand Up @@ -647,6 +648,8 @@ pub fn cnpg_cluster_from_cdb(
let (backup, service_account_template) = cnpg_backup_configuration(cdb, &cfg);
let storage = cnpg_cluster_storage(cdb);
let replication = cnpg_high_availability(cdb);
let affinity = cdb.spec.affinity_configuration.clone();
let topology_spread_constraints = cdb.spec.topology_spread_constraints.clone();

let PostgresConfig {
postgres_parameters,
Expand Down Expand Up @@ -709,11 +712,8 @@ pub fn cnpg_cluster_from_cdb(
..ObjectMeta::default()
},
spec: ClusterSpec {
affinity: Some(ClusterAffinity {
pod_anti_affinity_type: Some("preferred".to_string()),
topology_key: Some("topology.kubernetes.io/zone".to_string()),
..ClusterAffinity::default()
}),
affinity,
topology_spread_constraints,
backup,
service_account_template,
bootstrap,
Expand Down Expand Up @@ -1260,13 +1260,23 @@ pub async fn reconcile_metrics_service(cdb: &CoreDB, ctx: Arc<Context>) -> Resul
}
// Reconcile a Pooler
#[instrument(skip(cdb, ctx) fields(trace_id, instance_name = %cdb.name_any()))]
pub async fn reconcile_pooler(cdb: &CoreDB, ctx: Arc<Context>) -> Result<(), Action> {
pub async fn reconcile_pooler(
cdb: &CoreDB,
ctx: Arc<Context>,
placement: Option<PlacementConfig>,
) -> Result<(), Action> {
let client = ctx.client.clone();
let name = cdb.name_any() + "-pooler";
let namespace = cdb.namespace().unwrap();
let pooler_api: Api<Pooler> = Api::namespaced(client.clone(), namespace.as_str());

let owner_reference = cdb.controller_owner_ref(&()).unwrap();
let pooler_api: Api<Pooler> = Api::namespaced(client.clone(), namespace.as_str());
let pooler_tolerations = placement
.as_ref()
.and_then(|config| config.convert_pooler_tolerations());
let topology_spread_constraints = placement
.as_ref()
.and_then(|p| p.convert_pooler_topology_spread_constraints());
let affinity = placement.as_ref().and_then(|p| p.convert_pooler_affinity());

// If pooler is enabled, create or update
if cdb.spec.connectionPooler.enabled {
Expand Down Expand Up @@ -1301,6 +1311,9 @@ pub async fn reconcile_pooler(cdb: &CoreDB, ctx: Arc<Context>) -> Result<(), Act
resources: cdb.spec.connectionPooler.pooler.resources.clone(),
..Default::default()
}],
affinity,
tolerations: pooler_tolerations,
topology_spread_constraints,
..Default::default()
}),
}),
Expand Down
1 change: 1 addition & 0 deletions tembo-operator/src/cloudnativepg/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ pub mod clusters;
pub(crate) mod cnpg;
// pub(crate) mod cnpg_backups;
mod cnpg_utils;
pub(crate) mod placement;
pub mod poolers;
mod scheduledbackups;

Expand Down
Loading

0 comments on commit 93276cb

Please sign in to comment.