diff --git a/.secrets.baseline b/.secrets.baseline index 8c2f2d68e..76297bc58 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -75,10 +75,6 @@ { "path": "detect_secrets.filters.allowlist.is_line_allowlisted" }, - { - "path": "detect_secrets.filters.common.is_baseline_file", - "filename": ".secrets.baseline" - }, { "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", "min_level": 2 @@ -169,14 +165,14 @@ "filename": "apis/clusters/v1beta1/cadence_types.go", "hashed_secret": "a242f4a16b957f7ff99eb24e189e94d270d2348b", "is_verified": false, - "line_number": 293 + "line_number": 291 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cadence_types.go", "hashed_secret": "a57ce131bd944bdf8ba2f2f93e179dc416ed0315", "is_verified": false, - "line_number": 302 + "line_number": 300 } ], "apis/clusters/v1beta1/cassandra_types.go": [ @@ -185,21 +181,21 @@ "filename": "apis/clusters/v1beta1/cassandra_types.go", "hashed_secret": "331cc743251c3b9504229de4d139c539da121a33", "is_verified": false, - "line_number": 253 + "line_number": 241 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cassandra_types.go", "hashed_secret": "0ad8d7005e084d4f028a4277b73c6fab24269c17", "is_verified": false, - "line_number": 340 + "line_number": 328 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cassandra_types.go", "hashed_secret": "e0a46b27231f798fe22dc4d5d82b5feeb5dcf085", "is_verified": false, - "line_number": 427 + "line_number": 415 } ], "apis/clusters/v1beta1/cassandra_webhook.go": [ @@ -208,7 +204,7 @@ "filename": "apis/clusters/v1beta1/cassandra_webhook.go", "hashed_secret": "e0a46b27231f798fe22dc4d5d82b5feeb5dcf085", "is_verified": false, - "line_number": 235 + "line_number": 236 } ], "apis/clusters/v1beta1/kafka_types.go": [ @@ -217,14 +213,14 @@ "filename": "apis/clusters/v1beta1/kafka_types.go", "hashed_secret": "964c67cddfe8e6707157152dcf319126502199dc", "is_verified": false, - "line_number": 274 + "line_number": 262 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/kafka_types.go", "hashed_secret": "589a0ad3cc6bc886a00c46a22e5065c48bd8e1b2", "is_verified": false, - "line_number": 434 + "line_number": 422 } ], "apis/clusters/v1beta1/kafkaconnect_types.go": [ @@ -305,21 +301,21 @@ "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 371 + "line_number": 369 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "a3d7d4a96d18c8fc5a1cf9c9c01c45b4690b4008", "is_verified": false, - "line_number": 377 + "line_number": 375 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "a57ce131bd944bdf8ba2f2f93e179dc416ed0315", "is_verified": false, - "line_number": 440 + "line_number": 438 } ], "apis/clusters/v1beta1/redis_types.go": [ @@ -351,7 +347,7 @@ "filename": "apis/clusters/v1beta1/redis_webhook.go", "hashed_secret": "bc1c5ae5fd4a238d86261f422e62c489de408c22", "is_verified": false, - "line_number": 322 + "line_number": 323 } ], "apis/clusters/v1beta1/zz_generated.deepcopy.go": [ @@ -360,7 +356,7 @@ "filename": "apis/clusters/v1beta1/zz_generated.deepcopy.go", "hashed_secret": "44e17306b837162269a410204daaa5ecee4ec22c", "is_verified": false, - "line_number": 1316 + "line_number": 665 } ], "apis/kafkamanagement/v1beta1/kafkauser_types.go": [ @@ -529,6 +525,22 @@ "line_number": 51 } ], + "controllers/clusters/helpers.go": [ + { + "type": "Secret Keyword", + "filename": "controllers/clusters/helpers.go", + "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", + "is_verified": false, + "line_number": 119 + }, + { + "type": "Secret Keyword", + "filename": "controllers/clusters/helpers.go", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 124 + } + ], "controllers/clusters/kafkaconnect_controller_test.go": [ { "type": "Secret Keyword", @@ -709,7 +721,7 @@ "filename": "pkg/instaclustr/client.go", "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 2078 + "line_number": 2084 } ], "pkg/instaclustr/mock/client.go": [ @@ -1116,5 +1128,5 @@ } ] }, - "generated_at": "2024-02-28T09:07:57Z" + "generated_at": "2024-02-28T14:20:52Z" } diff --git a/apis/clusters/v1beta1/cadence_types.go b/apis/clusters/v1beta1/cadence_types.go index 448d6c853..53e5cb96d 100644 --- a/apis/clusters/v1beta1/cadence_types.go +++ b/apis/clusters/v1beta1/cadence_types.go @@ -33,7 +33,7 @@ import ( type CadenceDataCentre struct { GenericDataCentreSpec `json:",inline"` - NumberOfNodes int `json:"numberOfNodes"` + NodesNumber int `json:"nodesNumber"` NodeSize string `json:"nodeSize"` ClientEncryption bool `json:"clientEncryption,omitempty"` @@ -138,8 +138,6 @@ type CadenceStatus struct { type CadenceDataCentreStatus struct { GenericDataCentreStatus `json:",inline"` - NumberOfNodes int `json:"numberOfNodes,omitempty"` - Nodes []*Node `json:"nodes,omitempty"` PrivateLink PrivateLinkStatuses `json:"privateLink,omitempty"` } @@ -322,7 +320,7 @@ func (cdc *CadenceDataCentre) ToInstAPI() *models.CadenceDataCentre { GenericDataCentreFields: cdc.GenericDataCentreSpec.ToInstAPI(), PrivateLink: cdc.PrivateLink.ToInstAPI(), ClientToClusterEncryption: cdc.ClientEncryption, - NumberOfNodes: cdc.NumberOfNodes, + NumberOfNodes: cdc.NodesNumber, NodeSize: cdc.NodeSize, } } @@ -526,14 +524,13 @@ func (c *Cadence) GetHeadlessPorts() []k8scorev1.ServicePort { func (cdc *CadenceDataCentreStatus) FromInstAPI(instaModel *models.CadenceDataCentre) { cdc.GenericDataCentreStatus.FromInstAPI(&instaModel.GenericDataCentreFields) - cdc.NumberOfNodes = instaModel.NumberOfNodes cdc.Nodes = nodesFromInstAPI(instaModel.Nodes) cdc.PrivateLink.FromInstAPI(instaModel.PrivateLink) } func (cdc *CadenceDataCentre) Equals(o *CadenceDataCentre) bool { return cdc.GenericDataCentreSpec.Equals(&o.GenericDataCentreSpec) && - cdc.NumberOfNodes == o.NumberOfNodes && + cdc.NodesNumber == o.NodesNumber && cdc.NodeSize == o.NodeSize && cdc.ClientEncryption == o.ClientEncryption && slices.EqualsPtr(cdc.PrivateLink, o.PrivateLink) @@ -543,7 +540,7 @@ func (cdc *CadenceDataCentre) FromInstAPI(instaModel *models.CadenceDataCentre) cdc.GenericDataCentreSpec.FromInstAPI(&instaModel.GenericDataCentreFields) cdc.PrivateLink.FromInstAPI(instaModel.PrivateLink) - cdc.NumberOfNodes = instaModel.NumberOfNodes + cdc.NodesNumber = instaModel.NumberOfNodes cdc.NodeSize = instaModel.NodeSize cdc.ClientEncryption = instaModel.ClientToClusterEncryption } @@ -581,6 +578,5 @@ func (cs *CadenceStatus) DCsEqual(o []*CadenceDataCentreStatus) bool { func (cdc *CadenceDataCentreStatus) Equals(o *CadenceDataCentreStatus) bool { return cdc.GenericDataCentreStatus.Equals(&o.GenericDataCentreStatus) && cdc.PrivateLink.Equal(o.PrivateLink) && - nodesEqual(cdc.Nodes, o.Nodes) && - cdc.NumberOfNodes == o.NumberOfNodes + nodesEqual(cdc.Nodes, o.Nodes) } diff --git a/apis/clusters/v1beta1/cadence_webhook.go b/apis/clusters/v1beta1/cadence_webhook.go index cda1a6dce..edbf0593b 100644 --- a/apis/clusters/v1beta1/cadence_webhook.go +++ b/apis/clusters/v1beta1/cadence_webhook.go @@ -416,8 +416,8 @@ func (cs *CadenceSpec) validateImmutableDataCentresFieldsUpdate(oldSpec CadenceS return fmt.Errorf("cannot update immutable data centre fields: new spec: %v: old spec: %v", newDCImmutableFields, oldDCImmutableFields) } - if newDC.NumberOfNodes < oldDC.NumberOfNodes { - return fmt.Errorf("deleting nodes is not supported. Number of nodes must be greater than: %v", oldDC.NumberOfNodes) + if newDC.NodesNumber < oldDC.NodesNumber { + return fmt.Errorf("deleting nodes is not supported. Number of nodes must be greater than: %v", oldDC.NodesNumber) } err := newDC.validateImmutableCloudProviderSettingsUpdate(&oldDC.GenericDataCentreSpec) diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go index c4ffcea1b..e4c5cff7c 100644 --- a/apis/clusters/v1beta1/cassandra_types.go +++ b/apis/clusters/v1beta1/cassandra_types.go @@ -54,7 +54,7 @@ type CassandraRestoreFrom struct { type CassandraSpec struct { GenericClusterSpec `json:",inline"` - RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty"` + RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty" dcomparisonSkip:"true"` DataCentres []*CassandraDataCentre `json:"dataCentres,omitempty"` LuceneEnabled bool `json:"luceneEnabled,omitempty"` PasswordAndUserAuth bool `json:"passwordAndUserAuth,omitempty"` @@ -73,18 +73,6 @@ type CassandraStatus struct { AvailableUsers References `json:"availableUsers,omitempty"` } -func (s *CassandraStatus) ToOnPremises() ClusterStatus { - dc := &DataCentreStatus{ - ID: s.DataCentres[0].ID, - Nodes: s.DataCentres[0].Nodes, - } - - return ClusterStatus{ - ID: s.ID, - DataCentres: []*DataCentreStatus{dc}, - } -} - func (s *CassandraStatus) Equals(o *CassandraStatus) bool { return s.GenericStatus.Equals(&o.GenericStatus) && s.DataCentresEqual(o) diff --git a/apis/clusters/v1beta1/kafka_types.go b/apis/clusters/v1beta1/kafka_types.go index d368eb8b2..5007b03b8 100644 --- a/apis/clusters/v1beta1/kafka_types.go +++ b/apis/clusters/v1beta1/kafka_types.go @@ -169,18 +169,6 @@ func (s *KafkaStatus) DCsEqual(o *KafkaStatus) bool { return true } -func (s *KafkaStatus) ToOnPremises() ClusterStatus { - dc := &DataCentreStatus{ - ID: s.DataCentres[0].ID, - Nodes: s.DataCentres[0].Nodes, - } - - return ClusterStatus{ - ID: s.ID, - DataCentres: []*DataCentreStatus{dc}, - } -} - //+kubebuilder:object:root=true //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/apis/clusters/v1beta1/kafkaconnect_types.go b/apis/clusters/v1beta1/kafkaconnect_types.go index 990e4cbc9..661e0c22c 100644 --- a/apis/clusters/v1beta1/kafkaconnect_types.go +++ b/apis/clusters/v1beta1/kafkaconnect_types.go @@ -105,7 +105,7 @@ type KafkaConnectDataCentre struct { GenericDataCentreSpec `json:",inline"` NodeSize string `json:"nodeSize"` - NumberOfNodes int `json:"numberOfNodes"` + NodesNumber int `json:"nodesNumber"` ReplicationFactor int `json:"replicationFactor"` } @@ -239,13 +239,13 @@ func (k *KafkaConnectDataCentre) FromInstAPI(instaModel *models.KafkaConnectData k.GenericDataCentreSpec.FromInstAPI(&instaModel.GenericDataCentreFields) k.NodeSize = instaModel.NodeSize - k.NumberOfNodes = instaModel.NumberOfNodes + k.NodesNumber = instaModel.NumberOfNodes k.ReplicationFactor = instaModel.ReplicationFactor } func (k *KafkaConnectDataCentre) Equals(o *KafkaConnectDataCentre) bool { return k.GenericDataCentreSpec.Equals(&o.GenericDataCentreSpec) && - k.NumberOfNodes == o.NumberOfNodes && + k.NodesNumber == o.NodesNumber && k.ReplicationFactor == o.ReplicationFactor && k.NodeSize == o.NodeSize } @@ -461,7 +461,7 @@ func (kdc *KafkaConnectDataCentre) ToInstAPI() *models.KafkaConnectDataCentre { return &models.KafkaConnectDataCentre{ GenericDataCentreFields: kdc.GenericDataCentreSpec.ToInstAPI(), NodeSize: kdc.NodeSize, - NumberOfNodes: kdc.NumberOfNodes, + NumberOfNodes: kdc.NodesNumber, ReplicationFactor: kdc.ReplicationFactor, } } diff --git a/apis/clusters/v1beta1/kafkaconnect_webhook.go b/apis/clusters/v1beta1/kafkaconnect_webhook.go index 3791b1c22..3e1ba3cc0 100644 --- a/apis/clusters/v1beta1/kafkaconnect_webhook.go +++ b/apis/clusters/v1beta1/kafkaconnect_webhook.go @@ -158,7 +158,7 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim return err } - if ((dc.NumberOfNodes*dc.ReplicationFactor)/dc.ReplicationFactor)%dc.ReplicationFactor != 0 { + if ((dc.NodesNumber*dc.ReplicationFactor)/dc.ReplicationFactor)%dc.ReplicationFactor != 0 { return fmt.Errorf("number of nodes must be a multiple of replication factor: %v", dc.ReplicationFactor) } } @@ -284,11 +284,11 @@ func (kc *KafkaConnectSpec) validateImmutableDataCentresFieldsUpdate(oldSpec Kaf return err } - if newDC.NumberOfNodes < oldDC.NumberOfNodes { - return fmt.Errorf("deleting nodes is not supported. Number of nodes must be greater than: %v", oldDC.NumberOfNodes) + if newDC.NodesNumber < oldDC.NodesNumber { + return fmt.Errorf("deleting nodes is not supported. Number of nodes must be greater than: %v", oldDC.NodesNumber) } - if ((newDC.NumberOfNodes*newDC.ReplicationFactor)/newDC.ReplicationFactor)%newDC.ReplicationFactor != 0 { + if ((newDC.NodesNumber*newDC.ReplicationFactor)/newDC.ReplicationFactor)%newDC.ReplicationFactor != 0 { return fmt.Errorf("number of nodes must be a multiple of replication factor: %v", newDC.ReplicationFactor) } } diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go index a928026e0..c7de6362d 100644 --- a/apis/clusters/v1beta1/postgresql_types.go +++ b/apis/clusters/v1beta1/postgresql_types.go @@ -38,7 +38,7 @@ type PgDataCentre struct { // PostgreSQL options ClientEncryption bool `json:"clientEncryption"` NodeSize string `json:"nodeSize"` - NumberOfNodes int `json:"numberOfNodes"` + NodesNumber int `json:"nodesNumber"` //+kubebuilder:Validation:MaxItems:=1 InterDataCentreReplication []*InterDataCentreReplication `json:"interDataCentreReplication,omitempty"` @@ -102,9 +102,7 @@ type PgStatus struct { type PgDataCentreStatus struct { GenericDataCentreStatus `json:",inline"` - - NumberOfNodes int `json:"numberOfNodes"` - Nodes []*Node `json:"nodes"` + Nodes []*Node `json:"nodes"` } //+kubebuilder:object:root=true @@ -244,7 +242,7 @@ func (pdc *PgDataCentre) ToInstAPI() *models.PGDataCentre { InterDataCentreReplication: pdc.InterDCReplicationToInstAPI(), IntraDataCentreReplication: pdc.IntraDCReplicationToInstAPI(), NodeSize: pdc.NodeSize, - NumberOfNodes: pdc.NumberOfNodes, + NumberOfNodes: pdc.NodesNumber, } } @@ -505,7 +503,7 @@ func (p PgExtensions) ToInstAPI() []*models.PGExtension { func (pdc *PgDataCentre) Equals(o *PgDataCentre) bool { return pdc.GenericDataCentreSpec.Equals(&o.GenericDataCentreSpec) && pdc.ClientEncryption == o.ClientEncryption && - pdc.NumberOfNodes == o.NumberOfNodes && + pdc.NodesNumber == o.NodesNumber && pdc.NodeSize == o.NodeSize && slices.EqualsPtr(pdc.InterDataCentreReplication, o.InterDataCentreReplication) && slices.EqualsPtr(pdc.IntraDataCentreReplication, o.IntraDataCentreReplication) && @@ -517,7 +515,7 @@ func (pdc *PgDataCentre) FromInstAPI(instaModel *models.PGDataCentre) { pdc.ClientEncryption = instaModel.ClientToClusterEncryption pdc.NodeSize = instaModel.NodeSize - pdc.NumberOfNodes = instaModel.NumberOfNodes + pdc.NodesNumber = instaModel.NumberOfNodes pdc.InterReplicationsFromInstAPI(instaModel.InterDataCentreReplication) pdc.IntraReplicationsFromInstAPI(instaModel.IntraDataCentreReplication) @@ -562,12 +560,10 @@ func (pgs *PgSpec) ClusterConfigurationsFromInstAPI(instaModels []*models.Cluste func (s *PgDataCentreStatus) FromInstAPI(instaModel *models.PGDataCentre) { s.GenericDataCentreStatus.FromInstAPI(&instaModel.GenericDataCentreFields) s.Nodes = nodesFromInstAPI(instaModel.Nodes) - s.NumberOfNodes = instaModel.NumberOfNodes } func (s *PgDataCentreStatus) Equals(o *PgDataCentreStatus) bool { return s.GenericDataCentreStatus.Equals(&o.GenericDataCentreStatus) && - s.NumberOfNodes == o.NumberOfNodes && nodesEqual(s.Nodes, o.Nodes) } diff --git a/apis/clusters/v1beta1/postgresql_webhook.go b/apis/clusters/v1beta1/postgresql_webhook.go index 7c3d7fed2..1cf7c6522 100644 --- a/apis/clusters/v1beta1/postgresql_webhook.go +++ b/apis/clusters/v1beta1/postgresql_webhook.go @@ -372,7 +372,7 @@ func (pgs *PgSpec) validateImmutableDCsFieldsUpdate(oldSpec PgSpec) error { return err } - if newDC.NumberOfNodes != oldDC.NumberOfNodes { + if newDC.NodesNumber != oldDC.NodesNumber { return models.ErrImmutableNodesNumber } @@ -429,7 +429,7 @@ func (pdc *PgDataCentre) newImmutableFields() *immutablePostgreSQLDCFields { }, specificPostgreSQLDC: specificPostgreSQLDC{ ClientEncryption: pdc.ClientEncryption, - NumberOfNodes: pdc.NumberOfNodes, + NumberOfNodes: pdc.NodesNumber, }, } } diff --git a/apis/clusters/v1beta1/redis_types.go b/apis/clusters/v1beta1/redis_types.go index ef31ce1c8..e0c90ea85 100644 --- a/apis/clusters/v1beta1/redis_types.go +++ b/apis/clusters/v1beta1/redis_types.go @@ -364,18 +364,6 @@ func (rs *RedisStatus) DCsEqual(o []*RedisDataCentreStatus) bool { return true } -func (s *RedisStatus) ToOnPremises() ClusterStatus { - dc := &DataCentreStatus{ - ID: s.DataCentres[0].ID, - Nodes: s.DataCentres[0].Nodes, - } - - return ClusterStatus{ - ID: s.ID, - DataCentres: []*DataCentreStatus{dc}, - } -} - func (r *Redis) GetUserRefs() References { return r.Spec.UserRefs } diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go index 164758108..1933aeeb8 100644 --- a/apis/clusters/v1beta1/structs.go +++ b/apis/clusters/v1beta1/structs.go @@ -32,29 +32,6 @@ type CloudProviderSettings struct { DisableSnapshotAutoExpiry bool `json:"disableSnapshotAutoExpiry,omitempty"` } -type DataCentre struct { - Name string `json:"name,omitempty"` - Region string `json:"region"` - CloudProvider string `json:"cloudProvider"` - ProviderAccountName string `json:"accountName,omitempty"` - CloudProviderSettings []*CloudProviderSettings `json:"cloudProviderSettings,omitempty"` - Network string `json:"network"` - NodeSize string `json:"nodeSize"` - NodesNumber int `json:"nodesNumber"` - Tags map[string]string `json:"tags,omitempty"` -} - -type DataCentreStatus struct { - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - Status string `json:"status,omitempty"` - Nodes []*Node `json:"nodes,omitempty"` - NodesNumber int `json:"nodesNumber,omitempty"` - EncryptionKeyID string `json:"encryptionKeyId,omitempty"` - PrivateLink PrivateLinkStatuses `json:"privateLink,omitempty"` - ResizeOperations []*ResizeOperation `json:"resizeOperations,omitempty"` -} - type RestoreCDCConfig struct { CustomVPCSettings *RestoreCustomVPCSettings `json:"customVpcSettings"` RestoreMode string `json:"restoreMode"` @@ -66,46 +43,6 @@ type RestoreCustomVPCSettings struct { Network string `json:"network"` } -type Options struct { - DataNodeSize string `json:"dataNodeSize,omitempty"` - MasterNodeSize string `json:"masterNodeSize,omitempty"` - OpenSearchDashboardsNodeSize string `json:"openSearchDashboardsNodeSize,omitempty"` -} - -type Cluster struct { - // Name [ 3 .. 32 ] characters. - Name string `json:"name,omitempty"` - - Version string `json:"version,omitempty"` - - // The PCI compliance standards relate to the security of user data and transactional information. - // Can only be applied clusters provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch and Redis. - PCICompliance bool `json:"pciCompliance,omitempty"` - - PrivateNetworkCluster bool `json:"privateNetworkCluster,omitempty"` - - // Non-production clusters may receive lower priority support and reduced SLAs. - // Production tier is not available when using Developer class nodes. See SLA Tier for more information. - // Enum: "PRODUCTION" "NON_PRODUCTION". - SLATier string `json:"slaTier,omitempty"` - - TwoFactorDelete []*TwoFactorDelete `json:"twoFactorDelete,omitempty"` - - Description string `json:"description,omitempty"` -} - -type ClusterStatus struct { - ID string `json:"id,omitempty"` - State string `json:"state,omitempty"` - DataCentres []*DataCentreStatus `json:"dataCentres,omitempty"` - CDCID string `json:"cdcid,omitempty"` - TwoFactorDeleteEnabled bool `json:"twoFactorDeleteEnabled,omitempty"` - Options *Options `json:"options,omitempty"` - CurrentClusterOperationStatus string `json:"currentClusterOperationStatus,omitempty"` - MaintenanceEvents []*clusterresource.ClusteredMaintenanceEventStatus `json:"maintenanceEvents,omitempty"` - NodeCount string `json:"nodeCount,omitempty"` -} - type ClusteredMaintenanceEvent struct { InProgress []*clusterresource.MaintenanceEventStatus `json:"inProgress"` Past []*clusterresource.MaintenanceEventStatus `json:"past"` @@ -223,10 +160,6 @@ func privateLinksToInstAPI(p []*PrivateLink) []*models.PrivateLink { return links } -type PrivateLinkV1 struct { - IAMPrincipalARNs []string `json:"iamPrincipalARNs"` -} - type immutableCluster struct { Name string Version string @@ -295,30 +228,6 @@ type ReplaceOperation struct { Status string `json:"status,omitempty"` } -func (c *Cluster) IsEqual(cluster Cluster) bool { - return c.Name == cluster.Name && - c.Version == cluster.Version && - c.PCICompliance == cluster.PCICompliance && - c.PrivateNetworkCluster == cluster.PrivateNetworkCluster && - c.SLATier == cluster.SLATier && - c.Description == cluster.Description && - c.IsTwoFactorDeleteEqual(cluster.TwoFactorDelete) -} - -func (c *Cluster) IsTwoFactorDeleteEqual(tfds []*TwoFactorDelete) bool { - if len(c.TwoFactorDelete) != len(tfds) { - return false - } - - for i, tfd := range tfds { - if *tfd != *c.TwoFactorDelete[i] { - return false - } - } - - return true -} - func (tfd *TwoFactorDelete) ToInstAPI() *models.TwoFactorDelete { return &models.TwoFactorDelete{ ConfirmationPhoneNumber: tfd.Phone, @@ -326,81 +235,6 @@ func (tfd *TwoFactorDelete) ToInstAPI() *models.TwoFactorDelete { } } -func (c *Cluster) TwoFactorDeletesToInstAPI() (TFDs []*models.TwoFactorDelete) { - for _, tfd := range c.TwoFactorDelete { - TFDs = append(TFDs, tfd.ToInstAPI()) - } - return -} - -func (c *Cluster) ClusterSettingsUpdateToInstAPI() *models.ClusterSettings { - settingsToAPI := &models.ClusterSettings{} - if c.TwoFactorDelete != nil { - iTFD := &models.TwoFactorDelete{} - for _, tfd := range c.TwoFactorDelete { - iTFD = tfd.ToInstAPI() - } - settingsToAPI.TwoFactorDelete = iTFD - } - settingsToAPI.Description = c.Description - - return settingsToAPI -} - -func (c *Cluster) TwoFactorDeleteToInstAPIv1() *models.TwoFactorDeleteV1 { - if len(c.TwoFactorDelete) == 0 { - return nil - } - - return &models.TwoFactorDeleteV1{ - DeleteVerifyEmail: c.TwoFactorDelete[0].Email, - DeleteVerifyPhone: c.TwoFactorDelete[0].Phone, - } -} - -func (dc *DataCentre) ToInstAPI() models.DataCentre { - providerSettings := dc.CloudProviderSettingsToInstAPI() - return models.DataCentre{ - Name: dc.Name, - Network: dc.Network, - NodeSize: dc.NodeSize, - NumberOfNodes: dc.NodesNumber, - AWSSettings: providerSettings.AWSSettings, - GCPSettings: providerSettings.GCPSettings, - AzureSettings: providerSettings.AzureSettings, - Tags: dc.TagsToInstAPI(), - CloudProvider: dc.CloudProvider, - Region: dc.Region, - ProviderAccountName: dc.ProviderAccountName, - } -} - -func (dc *DataCentre) CloudProviderSettingsToInstAPI() *models.CloudProviderSettings { - iSettings := &models.CloudProviderSettings{} - switch dc.CloudProvider { - case models.AWSVPC: - awsSettings := []*models.AWSSetting{} - for _, providerSettings := range dc.CloudProviderSettings { - awsSettings = append(awsSettings, providerSettings.AWSToInstAPI()) - } - iSettings.AWSSettings = awsSettings - case models.AZUREAZ: - azureSettings := []*models.AzureSetting{} - for _, providerSettings := range dc.CloudProviderSettings { - azureSettings = append(azureSettings, providerSettings.AzureToInstAPI()) - } - iSettings.AzureSettings = azureSettings - case models.GCP: - gcpSettings := []*models.GCPSetting{} - for _, providerSettings := range dc.CloudProviderSettings { - gcpSettings = append(gcpSettings, providerSettings.GCPToInstAPI()) - } - iSettings.GCPSettings = gcpSettings - } - - return iSettings -} - func (cps *CloudProviderSettings) AWSToInstAPI() *models.AWSSetting { return &models.AWSSetting{ EBSEncryptionKey: cps.DiskEncryptionKey, @@ -422,78 +256,6 @@ func (cps *CloudProviderSettings) GCPToInstAPI() *models.GCPSetting { } } -func (dc *DataCentre) TagsToInstAPI() (tags []*models.Tag) { - for key, value := range dc.Tags { - tags = append(tags, &models.Tag{ - Key: key, - Value: value, - }) - } - - return -} - -func (dc *DataCentre) IsEqual(iDC DataCentre) bool { - return iDC.Region == dc.Region && - iDC.CloudProvider == dc.CloudProvider && - iDC.ProviderAccountName == dc.ProviderAccountName && - dc.AreCloudProviderSettingsEqual(iDC.CloudProviderSettings) && - iDC.Network == dc.Network && - iDC.NodeSize == dc.NodeSize && - iDC.NodesNumber == dc.NodesNumber && - dc.AreTagsEqual(iDC.Tags) -} - -func (dc *DataCentre) AreCloudProviderSettingsEqual(settings []*CloudProviderSettings) bool { - if len(dc.CloudProviderSettings) != len(settings) { - return false - } - - for i, setting := range settings { - if *dc.CloudProviderSettings[i] != *setting { - return false - } - } - - return true -} - -func (dc *DataCentre) AreTagsEqual(tags map[string]string) bool { - if len(dc.Tags) != len(tags) { - return false - } - - for key, val := range tags { - if value, exists := dc.Tags[key]; !exists || value != val { - return false - } - } - - return true -} - -func (dc *DataCentre) SetDefaultValues() { - if dc.ProviderAccountName == "" { - dc.ProviderAccountName = models.DefaultAccountName - } -} - -func (cs *ClusterStatus) AreMaintenanceEventStatusesEqual( - iEventStatuses []*clusterresource.ClusteredMaintenanceEventStatus, -) bool { - if len(cs.MaintenanceEvents) != len(iEventStatuses) { - return false - } - - for i := range iEventStatuses { - if !areEventStatusesEqual(iEventStatuses[i], cs.MaintenanceEvents[i]) { - return false - } - } - - return true -} - func areEventStatusesEqual(a, b *clusterresource.ClusteredMaintenanceEventStatus) bool { if len(a.Past) != len(b.Past) || len(a.InProgress) != len(b.InProgress) || @@ -542,152 +304,6 @@ func areClusteredMaintenanceEventStatusEqual(a, b *clusterresource.MaintenanceEv a.Outcome == b.Outcome } -func (cs *ClusterStatus) DCFromInstAPI(iDC models.DataCentre) *DataCentreStatus { - return &DataCentreStatus{ - Name: iDC.Name, - ID: iDC.ID, - Status: iDC.Status, - Nodes: cs.NodesFromInstAPI(iDC.Nodes), - NodesNumber: iDC.NumberOfNodes, - } -} - -func (c *Cluster) TwoFactorDeleteFromInstAPI(iTFDs []*models.TwoFactorDelete) (tfd []*TwoFactorDelete) { - for _, iTFD := range iTFDs { - tfd = append(tfd, &TwoFactorDelete{ - Email: iTFD.ConfirmationEmail, - Phone: iTFD.ConfirmationPhoneNumber, - }) - } - return -} - -func (c *Cluster) DCFromInstAPI(iDC models.DataCentre) DataCentre { - return DataCentre{ - Name: iDC.Name, - Region: iDC.Region, - CloudProvider: iDC.CloudProvider, - ProviderAccountName: iDC.ProviderAccountName, - CloudProviderSettings: c.CloudProviderSettingsFromInstAPI(iDC), - Network: iDC.Network, - NodeSize: iDC.NodeSize, - NodesNumber: iDC.NumberOfNodes, - Tags: c.TagsFromInstAPI(iDC.Tags), - } -} - -func (c *Cluster) TagsFromInstAPI(iTags []*models.Tag) map[string]string { - newTags := map[string]string{} - for _, iTag := range iTags { - newTags[iTag.Key] = iTag.Value - } - return newTags -} - -func (c *Cluster) ClusterSettingsNeedUpdate(iCluster Cluster) bool { - return len(c.TwoFactorDelete) != 0 && len(iCluster.TwoFactorDelete) == 0 || - c.Description != iCluster.Description -} - -func (c *Cluster) CloudProviderSettingsFromInstAPI(iDC models.DataCentre) (settings []*CloudProviderSettings) { - if isCloudProviderSettingsEmpty(iDC) { - return nil - } - - switch iDC.CloudProvider { - case models.AWSVPC: - for _, awsSetting := range iDC.AWSSettings { - settings = append(settings, &CloudProviderSettings{ - CustomVirtualNetworkID: awsSetting.CustomVirtualNetworkID, - DiskEncryptionKey: awsSetting.EBSEncryptionKey, - BackupBucket: awsSetting.BackupBucket, - }) - } - case models.GCP: - for _, gcpSetting := range iDC.GCPSettings { - settings = append(settings, &CloudProviderSettings{ - CustomVirtualNetworkID: gcpSetting.CustomVirtualNetworkID, - DisableSnapshotAutoExpiry: gcpSetting.DisableSnapshotAutoExpiry, - }) - } - case models.AZUREAZ: - for _, azureSetting := range iDC.AzureSettings { - settings = append(settings, &CloudProviderSettings{ - ResourceGroup: azureSetting.ResourceGroup, - }) - } - } - return -} - -func isCloudProviderSettingsEmpty(iDC models.DataCentre) bool { - var empty bool - - for i := range iDC.AWSSettings { - empty = *iDC.AWSSettings[i] == models.AWSSetting{} - if !empty { - return false - } - } - - for i := range iDC.AzureSettings { - empty = *iDC.AzureSettings[i] == models.AzureSetting{} - if !empty { - return false - } - } - - for i := range iDC.GCPSettings { - empty = *iDC.GCPSettings[i] == models.GCPSetting{} - if !empty { - return false - } - } - - return true -} - -func (cs *ClusterStatus) NodesFromInstAPI(iNodes []*models.Node) (nodes []*Node) { - for _, iNode := range iNodes { - nodes = append(nodes, &Node{ - ID: iNode.ID, - Size: iNode.Size, - PublicAddress: iNode.PublicAddress, - PrivateAddress: iNode.PrivateAddress, - Status: iNode.Status, - Roles: iNode.Roles, - Rack: iNode.Rack, - }) - } - return nodes -} - -func (cs *ClusterStatus) NodesFromInstAPIv1(iNodes []*models.NodeStatusV1) (nodes []*Node) { - for _, iNode := range iNodes { - nodes = append(nodes, &Node{ - ID: iNode.ID, - Size: iNode.Size, - PublicAddress: iNode.PublicAddress, - PrivateAddress: iNode.PrivateAddress, - Status: iNode.NodeStatus, - Rack: iNode.Rack, - }) - } - return nodes -} - -func (cs *ClusterStatus) PrivateLinkStatusesEqual(iStatus *ClusterStatus) bool { - for _, iDC := range iStatus.DataCentres { - for _, k8sDC := range cs.DataCentres { - if !iDC.PrivateLink.Equal(k8sDC.PrivateLink) { - return false - } - } - } - - return true -} - // +kubebuilder:object:generate:=false type Reference = apiextensions.ObjectReference diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go index f27248f82..91ed82011 100644 --- a/apis/clusters/v1beta1/validation.go +++ b/apis/clusters/v1beta1/validation.go @@ -32,77 +32,6 @@ import ( "github.com/instaclustr/operator/pkg/validation" ) -func (c *Cluster) ValidateCreation() error { - clusterNameMatched, err := regexp.Match(models.ClusterNameRegExp, []byte(c.Name)) - if !clusterNameMatched || err != nil { - return fmt.Errorf("cluster name should have lenght from 3 to 32 symbols and fit pattern: %s", - models.ClusterNameRegExp) - } - - if len(c.TwoFactorDelete) > 1 { - return fmt.Errorf("two factor delete should not have more than 1 item") - } - - if !validation.Contains(c.SLATier, models.SLATiers) { - return fmt.Errorf("cluster SLATier %s is unavailable, available values: %v", - c.SLATier, models.SLATiers) - } - - return nil -} - -func (dc *DataCentre) ValidateCreation() error { - if !validation.Contains(dc.CloudProvider, models.CloudProviders) { - return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available values: %v", - dc.CloudProvider, dc.Name, models.CloudProviders) - } - - switch dc.CloudProvider { - case models.AWSVPC: - if !validation.Contains(dc.Region, models.AWSRegions) { - return fmt.Errorf("AWS Region: %s is unavailable, available regions: %v", - dc.Region, models.AWSRegions) - } - case models.AZUREAZ: - if !validation.Contains(dc.Region, models.AzureRegions) { - return fmt.Errorf("azure Region: %s is unavailable, available regions: %v", - dc.Region, models.AzureRegions) - } - case models.GCP: - if !validation.Contains(dc.Region, models.GCPRegions) { - return fmt.Errorf("GCP Region: %s is unavailable, available regions: %v", - dc.Region, models.GCPRegions) - } - case models.ONPREMISES: - if dc.Region != models.CLIENTDC { - return fmt.Errorf("ONPREMISES Region: %s is unavailable, available regions: %v", - dc.Region, models.CLIENTDC) - } - } - - if dc.ProviderAccountName == models.DefaultAccountName && len(dc.CloudProviderSettings) != 0 { - return fmt.Errorf("cloud provider settings can be used only with RIYOA accounts") - } - - if len(dc.CloudProviderSettings) > 1 { - return fmt.Errorf("cloud provider settings should not have more than 1 item") - } - - for _, cp := range dc.CloudProviderSettings { - err := cp.ValidateCreation() - if err != nil { - return err - } - } - - networkMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(dc.Network)) - if !networkMatched || err != nil { - return fmt.Errorf("the provided CIDR: %s must contain four dot separated parts and form the Private IP address. All bits in the host part of the CIDR must be 0. Suffix must be between 16-28. %v", dc.Network, err) - } - - return nil -} - func (ops *OnPremisesSpec) ValidateCreation() error { if ops.StorageClassName == "" || ops.DataDiskSize == "" || ops.OSDiskSize == "" || ops.NodeCPU == 0 || ops.NodeMemory == "" || ops.OSImageURL == "" || ops.CloudInitScriptRef == nil { @@ -143,20 +72,6 @@ func (ops *OnPremisesSpec) ValidateSSHGatewayCreation() error { return nil } -func (dc *DataCentre) validateImmutableCloudProviderSettingsUpdate(oldSettings []*CloudProviderSettings) error { - if len(oldSettings) != len(dc.CloudProviderSettings) { - return models.ErrImmutableCloudProviderSettings - } - - for i, newProviderSettings := range dc.CloudProviderSettings { - if *newProviderSettings != *oldSettings[i] { - return models.ErrImmutableCloudProviderSettings - } - } - - return nil -} - func (cps *CloudProviderSettings) ValidateCreation() error { if (cps.ResourceGroup != "" && cps.DiskEncryptionKey != "") || (cps.ResourceGroup != "" && cps.CustomVirtualNetworkID != "") { @@ -279,20 +194,7 @@ func validateSingleConcurrentResize(concurrentResizes int) error { return nil } -func (dc *DataCentre) ValidateOnPremisesCreation() error { - if dc.CloudProvider != models.ONPREMISES { - return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available value: %s", - dc.CloudProvider, dc.Name, models.ONPREMISES) - } - - if dc.Region != models.CLIENTDC { - return fmt.Errorf("region %s is unavailable for data centre: %s, available value: %s", - dc.Region, dc.Name, models.CLIENTDC) - } - - return nil -} - +//nolint:unused func ContainsKubeVirtAddon(ctx context.Context, client client.Client) (bool, error) { namespaces := &k8scorev1.NamespaceList{} err := client.List(ctx, namespaces) diff --git a/apis/clusters/v1beta1/validation_test.go b/apis/clusters/v1beta1/validation_test.go index fe5c64e80..a912a1941 100644 --- a/apis/clusters/v1beta1/validation_test.go +++ b/apis/clusters/v1beta1/validation_test.go @@ -6,194 +6,6 @@ import ( "github.com/instaclustr/operator/pkg/models" ) -func TestCluster_ValidateCreation(t *testing.T) { - type fields struct { - Name string - Version string - PCICompliance bool - PrivateNetworkCluster bool - SLATier string - TwoFactorDelete []*TwoFactorDelete - Description string - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "empty cluster name", - fields: fields{ - Name: "", - }, - wantErr: true, - }, - { - name: "more than one two factor delete", - fields: fields{ - Name: "test", - TwoFactorDelete: []*TwoFactorDelete{ - { - Email: "test@mail.com", - Phone: "12345", - }, { - Email: "test@mail.com", - Phone: "12345", - }, - }, - }, - wantErr: true, - }, - { - name: "unsupported SLAtier", - fields: fields{ - Name: "test", - SLATier: "test", - }, - wantErr: true, - }, - { - name: "valid cluster", - fields: fields{ - Name: "test", - SLATier: "NON_PRODUCTION", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Cluster{ - Name: tt.fields.Name, - Version: tt.fields.Version, - PCICompliance: tt.fields.PCICompliance, - PrivateNetworkCluster: tt.fields.PrivateNetworkCluster, - SLATier: tt.fields.SLATier, - TwoFactorDelete: tt.fields.TwoFactorDelete, - Description: tt.fields.Description, - } - if err := c.ValidateCreation(); (err != nil) != tt.wantErr { - t.Errorf("ValidateCreation() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestDataCentre_ValidateCreation(t *testing.T) { - type fields struct { - Name string - Region string - CloudProvider string - ProviderAccountName string - CloudProviderSettings []*CloudProviderSettings - Network string - NodeSize string - NodesNumber int - Tags map[string]string - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "unavailable cloud provider", - fields: fields{ - CloudProvider: "some unavailable cloud provider", - }, - wantErr: true, - }, - { - name: "unavailable region for AWS cloud provider", - fields: fields{ - Region: models.AzureRegions[0], - CloudProvider: models.AWSVPC, - }, - wantErr: true, - }, - { - name: "unavailable region for Azure cloud provider", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AZUREAZ, - }, - wantErr: true, - }, - { - name: "unavailable region for GCP cloud provider", - fields: fields{ - Region: models.AzureRegions[0], - CloudProvider: models.GCP, - }, - wantErr: true, - }, - { - name: "unavailable region for ONPREMISES cloud provider", - fields: fields{ - Region: models.AzureRegions[0], - CloudProvider: models.ONPREMISES, - }, - wantErr: true, - }, - { - name: "cloud provider settings on not RIYOA account", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - ProviderAccountName: models.DefaultAccountName, - CloudProviderSettings: []*CloudProviderSettings{{}}, - }, - wantErr: true, - }, - { - name: "more than one cloud provider settings", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - ProviderAccountName: "custom", - CloudProviderSettings: []*CloudProviderSettings{{}, {}}, - }, - wantErr: true, - }, - { - name: "invalid network", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - Network: "test", - }, - wantErr: true, - }, - { - name: "valid DC", - fields: fields{ - Region: models.AWSRegions[0], - CloudProvider: models.AWSVPC, - Network: "172.16.0.0/19", - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dc := &DataCentre{ - Name: tt.fields.Name, - Region: tt.fields.Region, - CloudProvider: tt.fields.CloudProvider, - ProviderAccountName: tt.fields.ProviderAccountName, - CloudProviderSettings: tt.fields.CloudProviderSettings, - Network: tt.fields.Network, - NodeSize: tt.fields.NodeSize, - NodesNumber: tt.fields.NodesNumber, - Tags: tt.fields.Tags, - } - if err := dc.ValidateCreation(); (err != nil) != tt.wantErr { - t.Errorf("ValidateCreation() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func TestCloudProviderSettings_ValidateCreation(t *testing.T) { type fields struct { CustomVirtualNetworkID string @@ -941,76 +753,6 @@ func TestGenericDataCentreSpec_hasCloudProviderSettings(t *testing.T) { } } -func TestDataCentre_validateImmutableCloudProviderSettingsUpdate(t *testing.T) { - type fields struct { - Name string - Region string - CloudProvider string - ProviderAccountName string - CloudProviderSettings []*CloudProviderSettings - Network string - NodeSize string - NodesNumber int - Tags map[string]string - } - type args struct { - oldSettings []*CloudProviderSettings - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - { - name: "different len of the CloudProviderSettings", - fields: fields{ - CloudProviderSettings: []*CloudProviderSettings{{}, {}}, - }, - args: args{oldSettings: []*CloudProviderSettings{{}}}, - wantErr: true, - }, - { - name: "different CloudProviderSettings", - fields: fields{CloudProviderSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "new", - }}}, - args: args{oldSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "test", - }}}, - wantErr: true, - }, - { - name: "unchanged CloudProviderSettings", - fields: fields{CloudProviderSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "test", - }}}, - args: args{oldSettings: []*CloudProviderSettings{{ - CustomVirtualNetworkID: "test", - }}}, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - dc := &DataCentre{ - Name: tt.fields.Name, - Region: tt.fields.Region, - CloudProvider: tt.fields.CloudProvider, - ProviderAccountName: tt.fields.ProviderAccountName, - CloudProviderSettings: tt.fields.CloudProviderSettings, - Network: tt.fields.Network, - NodeSize: tt.fields.NodeSize, - NodesNumber: tt.fields.NodesNumber, - Tags: tt.fields.Tags, - } - if err := dc.validateImmutableCloudProviderSettingsUpdate(tt.args.oldSettings); (err != nil) != tt.wantErr { - t.Errorf("validateImmutableCloudProviderSettingsUpdate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func TestGenericClusterSpec_ValidateCreation(t *testing.T) { type fields struct { Name string diff --git a/apis/clusters/v1beta1/zookeeper_types.go b/apis/clusters/v1beta1/zookeeper_types.go index b5b7d1a9c..54c80d4a6 100644 --- a/apis/clusters/v1beta1/zookeeper_types.go +++ b/apis/clusters/v1beta1/zookeeper_types.go @@ -17,32 +17,41 @@ limitations under the License. package v1beta1 import ( - "encoding/json" "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/instaclustr/operator/pkg/models" + "github.com/instaclustr/operator/pkg/utils/slices" ) type ZookeeperDataCentre struct { - DataCentre `json:",inline"` + GenericDataCentreSpec `json:",inline"` + + NodesNumber int `json:"nodesNumber"` + NodeSize string `json:"nodeSize"` ClientToServerEncryption bool `json:"clientToServerEncryption"` - EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` EnforceAuthEnabled bool `json:"enforceAuthEnabled,omitempty"` + EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` } // ZookeeperSpec defines the desired state of Zookeeper type ZookeeperSpec struct { - Cluster `json:",inline"` - DataCentres []*ZookeeperDataCentre `json:"dataCentres"` + GenericClusterSpec `json:",inline"` + DataCentres []*ZookeeperDataCentre `json:"dataCentres"` } // ZookeeperStatus defines the observed state of Zookeeper type ZookeeperStatus struct { - ClusterStatus `json:",inline"` - DefaultUserSecretRef *Reference `json:"defaultUserSecretRef,omitempty"` + GenericStatus `json:",inline"` + DataCentres []*ZookeeperDataCentreStatus `json:"dataCentres,omitempty"` + DefaultUserSecretRef *Reference `json:"defaultUserSecretRef,omitempty"` +} + +type ZookeeperDataCentreStatus struct { + GenericDataCentreStatus `json:",inline"` + Nodes []*Node `json:"nodes"` } //+kubebuilder:object:root=true @@ -85,19 +94,9 @@ func (z *Zookeeper) NewPatch() client.Patch { return client.MergeFrom(old) } -func (z *Zookeeper) FromInstAPI(iData []byte) (*Zookeeper, error) { - iZook := &models.ZookeeperCluster{} - err := json.Unmarshal(iData, iZook) - if err != nil { - return nil, err - } - - return &Zookeeper{ - TypeMeta: z.TypeMeta, - ObjectMeta: z.ObjectMeta, - Spec: z.Spec.FromInstAPI(iZook), - Status: z.Status.FromInstAPI(iZook), - }, nil +func (z *Zookeeper) FromInstAPI(instaModel *models.ZookeeperCluster) { + z.Spec.FromInstAPI(instaModel) + z.Status.FromInstAPI(instaModel) } func (z *Zookeeper) GetDataCentreID(cdcName string) string { @@ -116,31 +115,15 @@ func (z *Zookeeper) GetClusterID() string { return z.Status.ID } -func (zs *ZookeeperSpec) FromInstAPI(iZook *models.ZookeeperCluster) ZookeeperSpec { - return ZookeeperSpec{ - Cluster: Cluster{ - Name: iZook.Name, - Version: iZook.ZookeeperVersion, - Description: iZook.Description, - PrivateNetworkCluster: iZook.PrivateNetworkCluster, - SLATier: iZook.SLATier, - TwoFactorDelete: zs.Cluster.TwoFactorDeleteFromInstAPI(iZook.TwoFactorDelete), - }, - DataCentres: zs.DCsFromInstAPI(iZook.DataCentres), - } +func (zs *ZookeeperSpec) FromInstAPI(instaModel *models.ZookeeperCluster) { + zs.GenericClusterSpec.FromInstAPI(&instaModel.GenericClusterFields, instaModel.ZookeeperVersion) + zs.DCsFromInstAPI(instaModel.DataCentres) } -func (zs *ZookeeperStatus) FromInstAPI(iZook *models.ZookeeperCluster) ZookeeperStatus { - return ZookeeperStatus{ - ClusterStatus: ClusterStatus{ - ID: iZook.ID, - State: iZook.Status, - DataCentres: zs.DCsFromInstAPI(iZook.DataCentres), - CurrentClusterOperationStatus: iZook.CurrentClusterOperationStatus, - MaintenanceEvents: zs.MaintenanceEvents, - NodeCount: zs.GetNodeCount(iZook.DataCentres), - }, - } +func (zs *ZookeeperStatus) FromInstAPI(instaModel *models.ZookeeperCluster) { + zs.GenericStatus.FromInstAPI(&instaModel.GenericClusterFields) + zs.DCsFromInstAPI(instaModel.DataCentres) + zs.NodeCount = zs.GetNodeCount(instaModel.DataCentres) } func (zs *ZookeeperStatus) GetNodeCount(dcs []*models.ZookeeperDataCentre) string { @@ -156,34 +139,31 @@ func (zs *ZookeeperStatus) GetNodeCount(dcs []*models.ZookeeperDataCentre) strin return fmt.Sprintf("%v/%v", running, total) } -func (zs *ZookeeperSpec) DCsFromInstAPI(iDCs []*models.ZookeeperDataCentre) (dcs []*ZookeeperDataCentre) { - for _, iDC := range iDCs { - dcs = append(dcs, &ZookeeperDataCentre{ - DataCentre: zs.Cluster.DCFromInstAPI(iDC.DataCentre), - ClientToServerEncryption: iDC.ClientToServerEncryption, - EnforceAuthSchemes: iDC.EnforceAuthSchemes, - EnforceAuthEnabled: iDC.EnforceAuthEnabled, - }) +func (zs *ZookeeperSpec) DCsFromInstAPI(instaModels []*models.ZookeeperDataCentre) { + dcs := make([]*ZookeeperDataCentre, len(instaModels)) + for i, instaModel := range instaModels { + dc := &ZookeeperDataCentre{} + dc.FromInstAPI(instaModel) + dcs[i] = dc } - return + zs.DataCentres = dcs } -func (zs *ZookeeperStatus) DCsFromInstAPI(iDCs []*models.ZookeeperDataCentre) (dcs []*DataCentreStatus) { - for _, iDC := range iDCs { - dcs = append(dcs, zs.ClusterStatus.DCFromInstAPI(iDC.DataCentre)) +func (zs *ZookeeperStatus) DCsFromInstAPI(instaModels []*models.ZookeeperDataCentre) { + dcs := make([]*ZookeeperDataCentreStatus, len(instaModels)) + for i, instaModel := range instaModels { + dc := &ZookeeperDataCentreStatus{} + dc.FromInstAPI(instaModel) + dcs[i] = dc } - return + zs.DataCentres = dcs } func (zs *ZookeeperSpec) ToInstAPI() *models.ZookeeperCluster { return &models.ZookeeperCluster{ - Name: zs.Name, - ZookeeperVersion: zs.Version, - PrivateNetworkCluster: zs.PrivateNetworkCluster, - SLATier: zs.SLATier, - TwoFactorDelete: zs.Cluster.TwoFactorDeletesToInstAPI(), - DataCentres: zs.DCsToInstAPI(), - Description: zs.Description, + GenericClusterFields: zs.GenericClusterSpec.ToInstAPI(), + ZookeeperVersion: zs.Version, + DataCentres: zs.DCsToInstAPI(), } } @@ -196,10 +176,12 @@ func (zs *ZookeeperSpec) DCsToInstAPI() (dcs []*models.ZookeeperDataCentre) { func (zdc *ZookeeperDataCentre) ToInstAPI() *models.ZookeeperDataCentre { return &models.ZookeeperDataCentre{ - DataCentre: zdc.DataCentre.ToInstAPI(), + GenericDataCentreFields: zdc.GenericDataCentreSpec.ToInstAPI(), ClientToServerEncryption: zdc.ClientToServerEncryption, EnforceAuthSchemes: zdc.EnforceAuthSchemes, EnforceAuthEnabled: zdc.EnforceAuthEnabled, + NumberOfNodes: zdc.NodesNumber, + NodeSize: zdc.NodeSize, } } @@ -208,26 +190,79 @@ func (z *Zookeeper) GetSpec() ZookeeperSpec { return z.Spec } func (z *Zookeeper) IsSpecEqual(spec ZookeeperSpec) bool { return z.Spec.IsEqual(spec) } func (a *ZookeeperSpec) IsEqual(b ZookeeperSpec) bool { - return a.Cluster.IsEqual(b.Cluster) && - a.areDCsEqual(b.DataCentres) + return a.GenericClusterSpec.Equals(&b.GenericClusterSpec) && + a.DCsEqual(b.DataCentres) } -func (rs *ZookeeperSpec) areDCsEqual(b []*ZookeeperDataCentre) bool { - a := rs.DataCentres - if len(a) != len(b) { +func (rs *ZookeeperSpec) DCsEqual(o []*ZookeeperDataCentre) bool { + if len(rs.DataCentres) != len(o) { return false } - for i := range b { - if a[i].Name != b[i].Name { - continue + m := map[string]*ZookeeperDataCentre{} + for _, dc := range rs.DataCentres { + m[dc.Name] = dc + } + + for _, iDC := range o { + dc, ok := m[iDC.Name] + if !ok || !dc.Equals(iDC) { + return false } + } + + return true +} - if !a[i].DataCentre.IsEqual(b[i].DataCentre) || - a[i].ClientToServerEncryption != b[i].ClientToServerEncryption { +func (zdc *ZookeeperDataCentre) FromInstAPI(instaModel *models.ZookeeperDataCentre) { + zdc.GenericDataCentreSpec.FromInstAPI(&instaModel.GenericDataCentreFields) + zdc.NodeSize = instaModel.NodeSize + zdc.NodesNumber = instaModel.NumberOfNodes + zdc.ClientToServerEncryption = instaModel.ClientToServerEncryption + zdc.EnforceAuthEnabled = instaModel.EnforceAuthEnabled + zdc.EnforceAuthSchemes = instaModel.EnforceAuthSchemes +} + +func (s *ZookeeperDataCentreStatus) FromInstAPI(instaModel *models.ZookeeperDataCentre) { + s.GenericDataCentreStatus.FromInstAPI(&instaModel.GenericDataCentreFields) + s.Nodes = nodesFromInstAPI(instaModel.Nodes) +} + +func (zdc *ZookeeperDataCentre) Equals(o *ZookeeperDataCentre) bool { + return zdc.GenericDataCentreSpec.Equals(&o.GenericDataCentreSpec) && + zdc.NodesNumber == o.NodesNumber && + zdc.NodeSize == o.NodeSize && + zdc.ClientToServerEncryption == o.ClientToServerEncryption && + zdc.EnforceAuthEnabled == o.EnforceAuthEnabled && + slices.Equals(zdc.EnforceAuthSchemes, o.EnforceAuthSchemes) +} + +func (zs *ZookeeperStatus) Equals(o *ZookeeperStatus) bool { + return zs.GenericStatus.Equals(&o.GenericStatus) && + zs.DCsEquals(o.DataCentres) +} + +func (zs *ZookeeperStatus) DCsEquals(o []*ZookeeperDataCentreStatus) bool { + if len(zs.DataCentres) != len(o) { + return false + } + + m := map[string]*ZookeeperDataCentreStatus{} + for _, dc := range zs.DataCentres { + m[dc.Name] = dc + } + + for _, iDC := range o { + dc, ok := m[iDC.Name] + if !ok || !dc.Equals(iDC) { return false } } return true } + +func (s *ZookeeperDataCentreStatus) Equals(o *ZookeeperDataCentreStatus) bool { + return s.GenericDataCentreStatus.Equals(&o.GenericDataCentreStatus) && + nodesEqual(s.Nodes, o.Nodes) +} diff --git a/apis/clusters/v1beta1/zookeeper_webhook.go b/apis/clusters/v1beta1/zookeeper_webhook.go index c12fb67db..a4c67fe18 100644 --- a/apis/clusters/v1beta1/zookeeper_webhook.go +++ b/apis/clusters/v1beta1/zookeeper_webhook.go @@ -61,10 +61,6 @@ func (z *Zookeeper) Default() { models.ResourceStateAnnotation: "", }) } - - for _, dataCentre := range z.Spec.DataCentres { - dataCentre.SetDefaultValues() - } } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -86,7 +82,7 @@ func (zv *zookeeperValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return err } - err = z.Spec.Cluster.ValidateCreation() + err = z.Spec.GenericClusterSpec.ValidateCreation() if err != nil { return err } @@ -107,7 +103,7 @@ func (zv *zookeeperValidator) ValidateCreate(ctx context.Context, obj runtime.Ob } for _, dc := range z.Spec.DataCentres { - err = dc.DataCentre.ValidateCreation() + err = dc.GenericDataCentreSpec.validateCreation() if err != nil { return err } @@ -127,17 +123,21 @@ func (zv *zookeeperValidator) ValidateUpdate(ctx context.Context, old runtime.Ob return fmt.Errorf("cannot assert object %v to zookeeper", new.GetObjectKind()) } - zookeeperlog.Info("validate update", "name", newZookeeper.Name) - - if newZookeeper.Status.ID == "" { - return zv.ValidateCreate(ctx, newZookeeper) + if newZookeeper.Annotations[models.ResourceStateAnnotation] == models.SyncingEvent { + return nil } if newZookeeper.Annotations[models.ExternalChangesAnnotation] == models.True { return nil } - if newZookeeper.Generation != oldZookeeper.Generation && !oldZookeeper.Spec.ClusterSettingsNeedUpdate(newZookeeper.Spec.Cluster) { + if newZookeeper.Status.ID == "" { + return zv.ValidateCreate(ctx, newZookeeper) + } + + zookeeperlog.Info("validate update", "name", newZookeeper.Name) + + if newZookeeper.Generation != oldZookeeper.Generation && !oldZookeeper.Spec.ClusterSettingsNeedUpdate(&newZookeeper.Spec.GenericClusterSpec) { return fmt.Errorf("update is not allowed") } diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index 0fc301951..ca231d5f8 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -705,32 +705,6 @@ func (in *CloudProviderSettings) DeepCopy() *CloudProviderSettings { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Cluster) DeepCopyInto(out *Cluster) { - *out = *in - if in.TwoFactorDelete != nil { - in, out := &in.TwoFactorDelete, &out.TwoFactorDelete - *out = make([]*TwoFactorDelete, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(TwoFactorDelete) - **out = **in - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. -func (in *Cluster) DeepCopy() *Cluster { - if in == nil { - return nil - } - out := new(Cluster) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterManagerNodes) DeepCopyInto(out *ClusterManagerNodes) { *out = *in @@ -746,48 +720,6 @@ func (in *ClusterManagerNodes) DeepCopy() *ClusterManagerNodes { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { - *out = *in - if in.DataCentres != nil { - in, out := &in.DataCentres, &out.DataCentres - *out = make([]*DataCentreStatus, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(DataCentreStatus) - (*in).DeepCopyInto(*out) - } - } - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = new(Options) - **out = **in - } - if in.MaintenanceEvents != nil { - in, out := &in.MaintenanceEvents, &out.MaintenanceEvents - *out = make([]*clusterresourcesv1beta1.ClusteredMaintenanceEventStatus, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(clusterresourcesv1beta1.ClusteredMaintenanceEventStatus) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. -func (in *ClusterStatus) DeepCopy() *ClusterStatus { - if in == nil { - return nil - } - out := new(ClusterStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusteredMaintenanceEvent) DeepCopyInto(out *ClusteredMaintenanceEvent) { *out = *in @@ -884,87 +816,6 @@ func (in *CustomConnectors) DeepCopy() *CustomConnectors { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataCentre) DeepCopyInto(out *DataCentre) { - *out = *in - if in.CloudProviderSettings != nil { - in, out := &in.CloudProviderSettings, &out.CloudProviderSettings - *out = make([]*CloudProviderSettings, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(CloudProviderSettings) - **out = **in - } - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCentre. -func (in *DataCentre) DeepCopy() *DataCentre { - if in == nil { - return nil - } - out := new(DataCentre) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataCentreStatus) DeepCopyInto(out *DataCentreStatus) { - *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]*Node, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Node) - (*in).DeepCopyInto(*out) - } - } - } - if in.PrivateLink != nil { - in, out := &in.PrivateLink, &out.PrivateLink - *out = make(PrivateLinkStatuses, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(privateLinkStatus) - **out = **in - } - } - } - if in.ResizeOperations != nil { - in, out := &in.ResizeOperations, &out.ResizeOperations - *out = make([]*ResizeOperation, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(ResizeOperation) - (*in).DeepCopyInto(*out) - } - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataCentreStatus. -func (in *DataCentreStatus) DeepCopy() *DataCentreStatus { - if in == nil { - return nil - } - out := new(DataCentreStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DebeziumCassandraSpec) DeepCopyInto(out *DebeziumCassandraSpec) { *out = *in @@ -2120,21 +1971,6 @@ func (in *OpenSearchStatus) DeepCopy() *OpenSearchStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Options) DeepCopyInto(out *Options) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Options. -func (in *Options) DeepCopy() *Options { - if in == nil { - return nil - } - out := new(Options) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PackagedProvisioning) DeepCopyInto(out *PackagedProvisioning) { *out = *in @@ -2558,26 +2394,6 @@ func (in PrivateLinkStatuses) DeepCopy() PrivateLinkStatuses { return *out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivateLinkV1) DeepCopyInto(out *PrivateLinkV1) { - *out = *in - if in.IAMPrincipalARNs != nil { - in, out := &in.IAMPrincipalARNs, &out.IAMPrincipalARNs - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkV1. -func (in *PrivateLinkV1) DeepCopy() *PrivateLinkV1 { - if in == nil { - return nil - } - out := new(PrivateLinkV1) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Redis) DeepCopyInto(out *Redis) { *out = *in @@ -3109,7 +2925,7 @@ func (in *Zookeeper) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperDataCentre) DeepCopyInto(out *ZookeeperDataCentre) { *out = *in - in.DataCentre.DeepCopyInto(&out.DataCentre) + in.GenericDataCentreSpec.DeepCopyInto(&out.GenericDataCentreSpec) if in.EnforceAuthSchemes != nil { in, out := &in.EnforceAuthSchemes, &out.EnforceAuthSchemes *out = make([]string, len(*in)) @@ -3127,6 +2943,33 @@ func (in *ZookeeperDataCentre) DeepCopy() *ZookeeperDataCentre { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ZookeeperDataCentreStatus) DeepCopyInto(out *ZookeeperDataCentreStatus) { + *out = *in + in.GenericDataCentreStatus.DeepCopyInto(&out.GenericDataCentreStatus) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZookeeperDataCentreStatus. +func (in *ZookeeperDataCentreStatus) DeepCopy() *ZookeeperDataCentreStatus { + if in == nil { + return nil + } + out := new(ZookeeperDataCentreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperList) DeepCopyInto(out *ZookeeperList) { *out = *in @@ -3162,7 +3005,7 @@ func (in *ZookeeperList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperSpec) DeepCopyInto(out *ZookeeperSpec) { *out = *in - in.Cluster.DeepCopyInto(&out.Cluster) + in.GenericClusterSpec.DeepCopyInto(&out.GenericClusterSpec) if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*ZookeeperDataCentre, len(*in)) @@ -3189,7 +3032,18 @@ func (in *ZookeeperSpec) DeepCopy() *ZookeeperSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ZookeeperStatus) DeepCopyInto(out *ZookeeperStatus) { *out = *in - in.ClusterStatus.DeepCopyInto(&out.ClusterStatus) + in.GenericStatus.DeepCopyInto(&out.GenericStatus) + if in.DataCentres != nil { + in, out := &in.DataCentres, &out.DataCentres + *out = make([]*ZookeeperDataCentreStatus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ZookeeperDataCentreStatus) + (*in).DeepCopyInto(*out) + } + } + } if in.DefaultUserSecretRef != nil { in, out := &in.DefaultUserSecretRef, &out.DefaultUserSecretRef *out = new(apiextensions.ObjectReference) diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml index 08c1a7299..02a9d14ed 100644 --- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml @@ -179,7 +179,7 @@ spec: type: string nodeSize: type: string - numberOfNodes: + nodesNumber: type: integer privateLink: items: @@ -209,7 +209,7 @@ spec: - name - network - nodeSize - - numberOfNodes + - nodesNumber - region type: object maxItems: 1 @@ -431,8 +431,6 @@ spec: type: string type: object type: array - numberOfNodes: - type: integer privateLink: items: properties: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml index 1efa3c34b..734dc8342 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml @@ -241,7 +241,7 @@ spec: type: string nodeSize: type: string - numberOfNodes: + nodesNumber: type: integer region: description: Region of the Data Centre. @@ -262,7 +262,7 @@ spec: - name - network - nodeSize - - numberOfNodes + - nodesNumber - region - replicationFactor type: object diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml index b23ec1e80..c9fb1b389 100644 --- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml +++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml @@ -183,7 +183,7 @@ spec: type: string nodeSize: type: string - numberOfNodes: + nodesNumber: type: integer pgBouncer: items: @@ -216,7 +216,7 @@ spec: - name - network - nodeSize - - numberOfNodes + - nodesNumber - region type: object type: array @@ -362,8 +362,6 @@ spec: type: string type: object type: array - numberOfNodes: - type: integer resizeOperations: items: properties: @@ -422,7 +420,6 @@ spec: type: string required: - nodes - - numberOfNodes type: object type: array id: diff --git a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml index d0f887e7b..4ee17c34b 100644 --- a/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml +++ b/config/crd/bases/clusters.instaclustr.com_zookeepers.yaml @@ -55,49 +55,135 @@ spec: items: properties: accountName: + default: INSTACLUSTR + description: For customers running in their own account. Your + provider account can be found on the Create Cluster page on + the Instaclustr Console, or the "Provider Account" property + on any existing cluster. For customers provisioning on Instaclustr's + cloud provider accounts, this property may be omitted. type: string - clientToServerEncryption: - type: boolean - cloudProvider: - type: string - cloudProviderSettings: + awsSettings: + description: AWS specific settings for the Data Centre. Cannot + be provided with GCP or Azure settings. items: properties: backupBucket: + description: Specify the S3 bucket to use for storing + backup data for the cluster data centre. Only available + for customers running in their own cloud provider accounts. + Currently supported for OpenSearch clusters only. type: string customVirtualNetworkId: + description: VPC ID into which the Data Centre will be + provisioned. The Data Centre's network allocation must + match the IPv4 CIDR block of the specified VPC. type: string - disableSnapshotAutoExpiry: - type: boolean - diskEncryptionKey: + encryptionKey: + description: ID of a KMS encryption key to encrypt data + on nodes. KMS encryption key must be set in Cluster + Resources through the Instaclustr Console before provisioning + an encrypted Data Centre. + type: string + type: object + maxItems: 1 + type: array + azureSettings: + description: Azure specific settings for the Data Centre. Cannot + be provided with AWS or GCP settings. + items: + properties: + customVirtualNetworkId: + description: VNet ID into which the Data Centre will be + provisioned. The VNet must have an available address + space for the Data Centre's network allocation to be + appended to the VNet. Currently supported for PostgreSQL + clusters only. type: string resourceGroup: + description: The name of the Azure Resource Group into + which the Data Centre will be provisioned. + type: string + storageNetwork: + description: 'The private network address block to be + used for the storage network. This is only used for + certain node sizes, currently limited to those which + use Azure NetApp Files: for all other node sizes, this + field should not be provided. The network must have + a prefix length between /16 and /28, and must be part + of a private address range.' type: string type: object + maxItems: 1 type: array + clientToServerEncryption: + type: boolean + cloudProvider: + description: Name of a cloud provider service. + type: string enforceAuthEnabled: type: boolean enforceAuthSchemes: items: type: string type: array + gcpSettings: + description: GCP specific settings for the Data Centre. Cannot + be provided with AWS or Azure settings. + items: + properties: + customVirtualNetworkId: + description: "Network name or a relative Network or Subnetwork + URI. The Data Centre's network allocation must match + the IPv4 CIDR block of the specified subnet. \n Examples: + Network URI: projects/{riyoa-gcp-project-name}/global/networks/{network-name}. + Network name: {network-name}, equivalent to projects/{riyoa-gcp-project-name}/global/networks/{network-name}. + Same-project subnetwork URI: projects/{riyoa-gcp-project-name}/regions/{region-id}/subnetworks/{subnetwork-name}. + Shared VPC subnetwork URI: projects/{riyoa-gcp-host-project-name}/regions/{region-id}/subnetworks/{subnetwork-name}." + type: string + disableSnapshotAutoExpiry: + description: Specify whether the GCS backup bucket should + automatically expire data after 7 days or not. Setting + this to true will disable automatic expiry and will + allow for creation of custom snapshot repositories with + customisable retention using the Index Management Plugin. + The storage will have to be manually cleared after the + cluster is deleted. Only available for customers running + in their own cloud provider accounts. Currently supported + for OpenSearch clusters only. + type: boolean + type: object + maxItems: 1 + type: array name: + description: A logical name for the data centre within a cluster. + These names must be unique in the cluster. type: string network: + description: The private network address block for the Data + Centre specified using CIDR address notation. The network + must have a prefix length between /12 and /22 and must be + part of a private address space. type: string nodeSize: type: string nodesNumber: type: integer region: + description: Region of the Data Centre. type: string tags: additionalProperties: type: string + description: List of tags to apply to the Data Centre. Tags + are metadata labels which allow you to identify, categorize + and filter clusters. This can be useful for grouping together + clusters into applications, environments, or any category + that you require. type: object required: - clientToServerEncryption - cloudProvider + - name - network - nodeSize - nodesNumber @@ -109,13 +195,7 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string - pciCompliance: - description: The PCI compliance standards relate to the security of - user data and transactional information. Can only be applied clusters - provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch - and Redis. - type: boolean - privateNetworkCluster: + privateNetwork: type: boolean slaTier: description: 'Non-production clusters may receive lower priority support @@ -144,15 +224,11 @@ spec: status: description: ZookeeperStatus defines the observed state of Zookeeper properties: - cdcid: - type: string currentClusterOperationStatus: type: string dataCentres: items: properties: - encryptionKeyId: - type: string id: type: string name: @@ -178,21 +254,6 @@ spec: type: string type: object type: array - nodesNumber: - type: integer - privateLink: - items: - properties: - advertisedHostname: - type: string - endPointServiceId: - type: string - endPointServiceName: - type: string - required: - - advertisedHostname - type: object - type: array resizeOperations: items: properties: @@ -249,6 +310,8 @@ spec: type: array status: type: string + required: + - nodes type: object type: array defaultUserSecretRef: @@ -352,19 +415,8 @@ spec: type: array nodeCount: type: string - options: - properties: - dataNodeSize: - type: string - masterNodeSize: - type: string - openSearchDashboardsNodeSize: - type: string - type: object state: type: string - twoFactorDeleteEnabled: - type: boolean type: object type: object served: true diff --git a/config/samples/clusters_v1beta1_cadence.yaml b/config/samples/clusters_v1beta1_cadence.yaml index 73b417dbb..0b1d29485 100644 --- a/config/samples/clusters_v1beta1_cadence.yaml +++ b/config/samples/clusters_v1beta1_cadence.yaml @@ -48,7 +48,7 @@ spec: name: "testdc" # nodeSize: "CAD-PRD-m5ad.large-75" nodeSize: "CAD-DEV-t3.small-5" - numberOfNodes: 2 + nodesNumber: 2 clientEncryption: false # privateLink: # - advertisedHostname: "cadence-sample-test.com" diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml index 57a663df3..338ca2b85 100644 --- a/config/samples/clusters_v1beta1_cassandra.yaml +++ b/config/samples/clusters_v1beta1_cassandra.yaml @@ -4,7 +4,7 @@ metadata: name: cassandra-cluster spec: name: "username-cassandra" #(immutable) - version: "4.1.13" #(immutable) + version: "4.1.3" #(immutable) privateNetwork: false #(immutable) dataCentres: - name: "AWS_cassandra" #(mutable) diff --git a/config/samples/clusters_v1beta1_kafkaconnect.yaml b/config/samples/clusters_v1beta1_kafkaconnect.yaml index 8023ede54..3371052e5 100644 --- a/config/samples/clusters_v1beta1_kafkaconnect.yaml +++ b/config/samples/clusters_v1beta1_kafkaconnect.yaml @@ -6,7 +6,7 @@ spec: name: "example-KC" dataCentres: - name: "US_EAST_1_DC_KAFKA" - numberOfNodes: 3 + nodesNumber: 3 cloudProvider: "AWS_VPC" replicationFactor: 3 tags: diff --git a/config/samples/clusters_v1beta1_postgresql.yaml b/config/samples/clusters_v1beta1_postgresql.yaml index cc2aa792c..a75259523 100644 --- a/config/samples/clusters_v1beta1_postgresql.yaml +++ b/config/samples/clusters_v1beta1_postgresql.yaml @@ -14,7 +14,7 @@ spec: cloudProvider: "AWS_VPC" # nodeSize: "PGS-DEV-t4g.medium-30" nodeSize: "PGS-DEV-t4g.small-5" - numberOfNodes: 2 + nodesNumber: 2 clientEncryption: false name: "testDC1" intraDataCentreReplication: diff --git a/config/samples/clusters_v1beta1_zookeeper.yaml b/config/samples/clusters_v1beta1_zookeeper.yaml index 570d4582e..43cf66ab1 100644 --- a/config/samples/clusters_v1beta1_zookeeper.yaml +++ b/config/samples/clusters_v1beta1_zookeeper.yaml @@ -3,7 +3,7 @@ kind: Zookeeper metadata: name: zookeeper-sample spec: - name: "username-zookeeper" + name: "example-zookeeper" # description: "some description" dataCentres: - clientToServerEncryption: false @@ -14,6 +14,6 @@ spec: # nodeSize: "zookeeper-production-m5.large-60" nodesNumber: 3 region: "US_EAST_1" - privateNetworkCluster: false + privateNetwork: false slaTier: "NON_PRODUCTION" version: "3.8.2" diff --git a/controllers/clusterresources/awsencryptionkey_controller.go b/controllers/clusterresources/awsencryptionkey_controller.go index f19f2cd19..631963afd 100644 --- a/controllers/clusterresources/awsencryptionkey_controller.go +++ b/controllers/clusterresources/awsencryptionkey_controller.go @@ -228,7 +228,7 @@ func (r *AWSEncryptionKeyReconciler) handleDelete( ) } - r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.SyncJob)) patch := encryptionKey.NewPatch() controllerutil.RemoveFinalizer(encryptionKey, models.DeletionFinalizer) encryptionKey.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -265,7 +265,7 @@ func (r *AWSEncryptionKeyReconciler) handleDelete( func (r *AWSEncryptionKeyReconciler) startEncryptionKeyStatusJob(encryptionKey *v1beta1.AWSEncryptionKey) error { job := r.newWatchStatusJob(encryptionKey) - err := r.Scheduler.ScheduleJob(encryptionKey.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(encryptionKey.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -286,7 +286,7 @@ func (r *AWSEncryptionKeyReconciler) newWatchStatusJob(encryptionKey *v1beta1.AW "namespaced name", key, ) - r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(encryptionKey.GetJobID(scheduler.SyncJob)) return nil } @@ -333,7 +333,7 @@ func (r *AWSEncryptionKeyReconciler) handleExternalDelete(ctx context.Context, k l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/awsendpointserviceprincipal_controller.go b/controllers/clusterresources/awsendpointserviceprincipal_controller.go index 3f5c9b783..e4fc26b8d 100644 --- a/controllers/clusterresources/awsendpointserviceprincipal_controller.go +++ b/controllers/clusterresources/awsendpointserviceprincipal_controller.go @@ -167,7 +167,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleCreate(ctx context.Context return ctrl.Result{}, err } r.EventRecorder.Eventf(principal, models.Normal, models.Created, - "Status check job %s has been started", principal.GetJobID(scheduler.StatusChecker), + "Status check job %s has been started", principal.GetJobID(scheduler.SyncJob), ) return ctrl.Result{}, nil @@ -203,7 +203,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleDelete(ctx context.Context func (r *AWSEndpointServicePrincipalReconciler) startWatchStatusJob(ctx context.Context, resource *clusterresourcesv1beta1.AWSEndpointServicePrincipal) error { job := r.newWatchStatusJob(ctx, resource) - return r.Scheduler.ScheduleJob(resource.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + return r.Scheduler.ScheduleJob(resource.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) } func (r *AWSEndpointServicePrincipalReconciler) newWatchStatusJob(ctx context.Context, principal *clusterresourcesv1beta1.AWSEndpointServicePrincipal) scheduler.Job { @@ -218,7 +218,7 @@ func (r *AWSEndpointServicePrincipalReconciler) newWatchStatusJob(ctx context.Co "namespaced name", key, ) - r.Scheduler.RemoveJob(principal.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(principal.GetJobID(scheduler.SyncJob)) return nil } @@ -252,7 +252,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleExternalDelete(ctx context l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(principal, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(principal.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(principal.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go b/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go index 0818463f1..f035ca80c 100644 --- a/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go +++ b/controllers/clusterresources/awssecuritygroupfirewallrule_controller.go @@ -256,7 +256,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( ) } - r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(firewallRule, models.DeletionFinalizer) firewallRule.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, firewallRule, patch) @@ -292,7 +292,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleDeleteFirewallRule( func (r *AWSSecurityGroupFirewallRuleReconciler) startFirewallRuleStatusJob(firewallRule *v1beta1.AWSSecurityGroupFirewallRule) error { job := r.newWatchStatusJob(firewallRule) - err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -313,7 +313,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) newWatchStatusJob(firewallRule "namespaced name", key, ) - r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.SyncJob)) return nil } @@ -360,7 +360,7 @@ func (r *AWSSecurityGroupFirewallRuleReconciler) handleExternalDelete(ctx contex l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(rule, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(rule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(rule.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/awsvpcpeering_controller.go b/controllers/clusterresources/awsvpcpeering_controller.go index 9d6d20d5d..e1a7d7466 100644 --- a/controllers/clusterresources/awsvpcpeering_controller.go +++ b/controllers/clusterresources/awsvpcpeering_controller.go @@ -362,7 +362,7 @@ func (r *AWSVPCPeeringReconciler) handleDeletePeering( return ctrl.Result{}, err } - r.Scheduler.RemoveJob(aws.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(aws.GetJobID(scheduler.SyncJob)) patch := aws.NewPatch() controllerutil.RemoveFinalizer(aws, models.DeletionFinalizer) @@ -398,7 +398,7 @@ func (r *AWSVPCPeeringReconciler) handleDeletePeering( func (r *AWSVPCPeeringReconciler) startAWSVPCPeeringStatusJob(awsPeering *v1beta1.AWSVPCPeering) error { job := r.newWatchStatusJob(awsPeering) - err := r.Scheduler.ScheduleJob(awsPeering.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(awsPeering.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -418,7 +418,7 @@ func (r *AWSVPCPeeringReconciler) newWatchStatusJob(awsPeering *v1beta1.AWSVPCPe "namespaced name", namespacedName, ) - r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -459,7 +459,7 @@ func (r *AWSVPCPeeringReconciler) newWatchStatusJob(awsPeering *v1beta1.AWSVPCPe "The AWSPeering was deleted on AWS, stopping job...", ) - r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(awsPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -546,7 +546,7 @@ func (r *AWSVPCPeeringReconciler) handleExternalDelete(ctx context.Context, key l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/azurevnetpeering_controller.go b/controllers/clusterresources/azurevnetpeering_controller.go index 233a22675..c92144b19 100644 --- a/controllers/clusterresources/azurevnetpeering_controller.go +++ b/controllers/clusterresources/azurevnetpeering_controller.go @@ -247,7 +247,7 @@ func (r *AzureVNetPeeringReconciler) handleDeletePeering( } if status != nil { - r.Scheduler.RemoveJob(azure.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(azure.GetJobID(scheduler.SyncJob)) err = r.API.DeletePeering(azure.Status.ID, instaclustr.AzurePeeringEndpoint) if err != nil { l.Error(err, "cannot update Azure VNet Peering resource status", @@ -312,7 +312,7 @@ func (r *AzureVNetPeeringReconciler) startAzureVNetPeeringStatusJob(azurePeering ) error { job := r.newWatchStatusJob(azurePeering) - err := r.Scheduler.ScheduleJob(azurePeering.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(azurePeering.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -334,7 +334,7 @@ func (r *AzureVNetPeeringReconciler) newWatchStatusJob(azureVNetPeering *v1beta1 "namespaced name", key, ) - r.Scheduler.RemoveJob(azureVNetPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(azureVNetPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -382,7 +382,7 @@ func (r *AzureVNetPeeringReconciler) handleExternalDelete(ctx context.Context, k l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusterresources/clusternetworkfirewallrule_controller.go b/controllers/clusterresources/clusternetworkfirewallrule_controller.go index 287cec570..2baca470d 100644 --- a/controllers/clusterresources/clusternetworkfirewallrule_controller.go +++ b/controllers/clusterresources/clusternetworkfirewallrule_controller.go @@ -276,7 +276,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( ) } - r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(firewallRule.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(firewallRule, models.DeletionFinalizer) firewallRule.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, firewallRule, patch) @@ -311,7 +311,7 @@ func (r *ClusterNetworkFirewallRuleReconciler) HandleDeleteFirewallRule( func (r *ClusterNetworkFirewallRuleReconciler) startFirewallRuleStatusJob(firewallRule *v1beta1.ClusterNetworkFirewallRule) error { job := r.newWatchStatusJob(firewallRule) - err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(firewallRule.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } diff --git a/controllers/clusterresources/gcpvpcpeering_controller.go b/controllers/clusterresources/gcpvpcpeering_controller.go index 61b5b7eab..0c269a0e1 100644 --- a/controllers/clusterresources/gcpvpcpeering_controller.go +++ b/controllers/clusterresources/gcpvpcpeering_controller.go @@ -257,7 +257,7 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( } patch := gcp.NewPatch() - r.Scheduler.RemoveJob(gcp.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(gcp.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(gcp, models.DeletionFinalizer) gcp.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, gcp, patch) @@ -295,7 +295,7 @@ func (r *GCPVPCPeeringReconciler) handleDeleteCluster( func (r *GCPVPCPeeringReconciler) startGCPVPCPeeringStatusJob(gcpPeering *v1beta1.GCPVPCPeering) error { job := r.newWatchStatusJob(gcpPeering) - err := r.Scheduler.ScheduleJob(gcpPeering.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(gcpPeering.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -316,7 +316,7 @@ func (r *GCPVPCPeeringReconciler) newWatchStatusJob(gcpPeering *v1beta1.GCPVPCPe "namespaced name", key, ) - r.Scheduler.RemoveJob(gcpPeering.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(gcpPeering.GetJobID(scheduler.SyncJob)) return nil } @@ -364,7 +364,7 @@ func (r *GCPVPCPeeringReconciler) handleExternalDelete(ctx context.Context, key l.Info(instaclustr.MsgInstaclustrResourceNotFound) r.EventRecorder.Eventf(key, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(key.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(key.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index 872513845..15cd446e5 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -247,7 +247,7 @@ func (r *CadenceReconciler) handleCreateCluster( } r.EventRecorder.Event(c, models.Normal, models.Created, - "Cluster status check job is started") + "Cluster sync job is started") } return ctrl.Result{}, nil @@ -424,7 +424,7 @@ func (r *CadenceReconciler) handleDeleteCluster( } } - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) patch := c.NewPatch() controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -689,7 +689,7 @@ func (r *CadenceReconciler) newCassandraSpec(c *v1beta1.Cadence, latestCassandra func (r *CadenceReconciler) startSyncJob(c *v1beta1.Cadence) error { job := r.newSyncJob(c) - err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -698,14 +698,14 @@ func (r *CadenceReconciler) startSyncJob(c *v1beta1.Cadence) error { } func (r *CadenceReconciler) newSyncJob(c *v1beta1.Cadence) scheduler.Job { - l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.StatusChecker), "clusterID", c.Status.ID) + l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.SyncJob), "clusterID", c.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(c) err := r.Get(context.Background(), namespacedName, c) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -1150,7 +1150,7 @@ func (r *CadenceReconciler) handleExternalDelete(ctx context.Context, c *v1beta1 r.EventRecorder.Eventf(c, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index 80db278ae..6f7f02487 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -239,6 +239,11 @@ func (r *CassandraReconciler) createCluster(ctx context.Context, c *v1beta1.Cass return fmt.Errorf("failed to update cassandra status, err: %w", err) } + err = r.createDefaultSecret(ctx, c, l) + if err != nil { + return fmt.Errorf("failed to create default cassandra user secret, err: %w", err) + } + l.Info( "Cluster has been created", "cluster name", c.Spec.Name, @@ -264,7 +269,7 @@ func (r *CassandraReconciler) startClusterJobs(c *v1beta1.Cassandra, l logr.Logg r.EventRecorder.Eventf( c, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) err = r.startClusterBackupsJob(c) @@ -316,21 +321,6 @@ func (r *CassandraReconciler) handleCreateCluster( } } - err := r.createDefaultSecret(ctx, c, l) - if err != nil { - l.Error(err, "Cannot create default secret for Cassandra", - "cluster name", c.Spec.Name, - "clusterID", c.Status.ID, - ) - r.EventRecorder.Eventf( - c, models.Warning, models.CreationFailed, - "Default user secret creation on the Instaclustr is failed. Reason: %v", - err, - ) - - return reconcile.Result{}, err - } - if c.Status.State != models.DeletedStatus { patch := c.NewPatch() c.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent @@ -532,7 +522,7 @@ func (r *CassandraReconciler) handleDeleteCluster( r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) l.Info("Deleting cluster backup resources", "cluster ID", c.Status.ID) @@ -615,7 +605,7 @@ func (r *CassandraReconciler) handleDeleteCluster( func (r *CassandraReconciler) startSyncJob(c *v1beta1.Cassandra) error { job := r.newSyncJob(c) - err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -658,7 +648,7 @@ func (r *CassandraReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cassandra, } func (r *CassandraReconciler) newSyncJob(c *v1beta1.Cassandra) scheduler.Job { - l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.StatusChecker), "clusterID", c.Status.ID) + l := log.Log.WithValues("syncJob", c.GetJobID(scheduler.SyncJob), "clusterID", c.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(c) @@ -668,7 +658,7 @@ func (r *CassandraReconciler) newSyncJob(c *v1beta1.Cassandra) scheduler.Job { "namespaced name", namespacedName) r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } @@ -1083,7 +1073,7 @@ func (r *CassandraReconciler) handleExternalDelete(ctx context.Context, c *v1bet r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/datatest/cadence_v1beta1.yaml b/controllers/clusters/datatest/cadence_v1beta1.yaml index c910f00e3..808c22941 100644 --- a/controllers/clusters/datatest/cadence_v1beta1.yaml +++ b/controllers/clusters/datatest/cadence_v1beta1.yaml @@ -52,7 +52,7 @@ spec: name: "testdc" # nodeSize: "CAD-PRD-m5ad.large-75" nodeSize: "cadence-test-node-size-1" - numberOfNodes: 1 + nodesNumber: 1 clientEncryption: false # privateLink: # - advertisedHostname: "cadence-sample-test.com" diff --git a/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml b/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml index 6cd7aaff9..dd7e1016c 100644 --- a/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml +++ b/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml @@ -52,7 +52,7 @@ spec: name: "testdc" # nodeSize: "CAD-PRD-m5ad.large-75" nodeSize: "cadence-test-node-size-1" - numberOfNodes: 1 + nodesNumber: 1 clientEncryption: false # privateLink: # - advertisedHostname: "cadence-sample-test.com" diff --git a/controllers/clusters/datatest/kafkaconnect_v1beta1.yaml b/controllers/clusters/datatest/kafkaconnect_v1beta1.yaml index 72ffd33a3..97419dcc3 100644 --- a/controllers/clusters/datatest/kafkaconnect_v1beta1.yaml +++ b/controllers/clusters/datatest/kafkaconnect_v1beta1.yaml @@ -8,7 +8,7 @@ metadata: spec: dataCentres: - name: "US_EAST_1_DC_KAFKA" - numberOfNodes: 3 + nodesNumber: 3 # accountName: "TestAccName" cloudProvider: "AWS_VPC" replicationFactor: 3 diff --git a/controllers/clusters/datatest/postgresql_v1beta1.yaml b/controllers/clusters/datatest/postgresql_v1beta1.yaml index 56a5a536a..b96b86dd2 100644 --- a/controllers/clusters/datatest/postgresql_v1beta1.yaml +++ b/controllers/clusters/datatest/postgresql_v1beta1.yaml @@ -13,7 +13,7 @@ spec: network: "10.1.0.0/16" cloudProvider: "AWS_VPC" nodeSize: "PGS-DEV-t4g.small-5" - numberOfNodes: 2 + nodesNumber: 2 accountName: "accountNameTEST" # cloudProviderSettings: # - customVirtualNetworkId: "vpc-12345678" diff --git a/controllers/clusters/datatest/zookeeper_v1beta1.yaml b/controllers/clusters/datatest/zookeeper_v1beta1.yaml index b82202173..c4b0fa3a1 100644 --- a/controllers/clusters/datatest/zookeeper_v1beta1.yaml +++ b/controllers/clusters/datatest/zookeeper_v1beta1.yaml @@ -19,8 +19,8 @@ spec: # - customVirtualNetworkId: "vpc-12345678" # diskEncryptionKey: "123e4567-e89b-12d3-a456-426614174000" # resourceGroup: "asdfadfsdfas" - name: "OperatorZookeeper" - privateNetworkCluster: true + name: "bohdan-zookeeper-test" + privateNetwork: true # twoFactorDelete: # - email: "emailTEST" # phone: "phoneTEST" diff --git a/controllers/clusters/helpers.go b/controllers/clusters/helpers.go index 2380137c1..a92068507 100644 --- a/controllers/clusters/helpers.go +++ b/controllers/clusters/helpers.go @@ -30,12 +30,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - "k8s.io/utils/strings/slices" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/instaclustr/operator/apis/clusters/v1beta1" "github.com/instaclustr/operator/pkg/models" "github.com/instaclustr/operator/pkg/utils/dcomparison" ) @@ -60,98 +58,6 @@ func convertAPIv2ConfigToMap(instConfigs []*models.ConfigurationProperties) map[ return newConfigs } -func areStatusesEqual(a, b *v1beta1.ClusterStatus) bool { - if a == nil && b == nil { - return true - } - - if a == nil || b == nil || - a.ID != b.ID || - a.State != b.State || - a.CDCID != b.CDCID || - a.TwoFactorDeleteEnabled != b.TwoFactorDeleteEnabled || - a.CurrentClusterOperationStatus != b.CurrentClusterOperationStatus || - !areDataCentresEqual(a.DataCentres, b.DataCentres) || - !areDataCentreOptionsEqual(a.Options, b.Options) || - !b.PrivateLinkStatusesEqual(a) { - return false - } - - return true -} - -func areDataCentreOptionsEqual(a, b *v1beta1.Options) bool { - if a == nil && b == nil { - return true - } - - if a == nil || b == nil { - return false - } - - return *a == *b -} - -func areDataCentresEqual(a, b []*v1beta1.DataCentreStatus) bool { - if a == nil && b == nil { - return true - } - - if len(a) != len(b) { - return false - } - - for i := range a { - if a[i].ID != b[i].ID { - continue - } - - if a[i].Status != b[i].Status || - a[i].NodesNumber != b[i].NodesNumber || - a[i].EncryptionKeyID != b[i].EncryptionKeyID { - return false - } - - if !isDataCentreNodesEqual(a[i].Nodes, b[i].Nodes) { - return false - } - } - - return true -} - -func isDataCentreNodesEqual(a, b []*v1beta1.Node) bool { - if a == nil && b == nil { - return true - } - if len(a) != len(b) { - return false - } - - for i := range a { - var eq bool - for j := range b { - if a[i].ID != b[j].ID { - continue - } - - if a[i].Size != b[j].Size || - a[i].PublicAddress != b[j].PublicAddress || - a[i].PrivateAddress != b[j].PrivateAddress || - a[i].Status != b[j].Status || - !slices.Equal(a[i].Roles, b[j].Roles) || - a[i].Rack != b[j].Rack { - return false - } - eq = true - } - if !eq { - return false - } - } - return true -} - func getSortedAppVersions(versions []*models.AppVersions, appType string) []*version.Version { for _, apps := range versions { if apps.Application == appType { diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go index b82b98a0b..037a38108 100644 --- a/controllers/clusters/kafka_controller.go +++ b/controllers/clusters/kafka_controller.go @@ -156,7 +156,7 @@ func (r *KafkaReconciler) startJobs(k *v1beta1.Kafka) error { r.EventRecorder.Eventf( k, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) if k.Spec.UserRefs != nil && k.Status.AvailableUsers == nil { @@ -376,7 +376,7 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, k *v1beta1.Ka return reconcile.Result{}, err } - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) controllerutil.RemoveFinalizer(k, models.DeletionFinalizer) k.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -429,7 +429,7 @@ func (r *KafkaReconciler) startClusterOnPremisesIPsJob(k *v1beta1.Kafka, b *onPr func (r *KafkaReconciler) startSyncJob(kafka *v1beta1.Kafka) error { job := r.newSyncJob(kafka) - err := r.Scheduler.ScheduleJob(kafka.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(kafka.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -438,7 +438,7 @@ func (r *KafkaReconciler) startSyncJob(kafka *v1beta1.Kafka) error { } func (r *KafkaReconciler) newSyncJob(k *v1beta1.Kafka) scheduler.Job { - l := log.Log.WithValues("syncJob", k.GetJobID(scheduler.StatusChecker), "clusterID", k.Status.ID) + l := log.Log.WithValues("syncJob", k.GetJobID(scheduler.SyncJob), "clusterID", k.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(k) @@ -446,7 +446,7 @@ func (r *KafkaReconciler) newSyncJob(k *v1beta1.Kafka) scheduler.Job { if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker)) return nil @@ -655,7 +655,7 @@ func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, k *v1beta1.K r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go index 697a4c721..029b8aea6 100644 --- a/controllers/clusters/kafkaconnect_controller.go +++ b/controllers/clusters/kafkaconnect_controller.go @@ -362,7 +362,7 @@ func (r *KafkaConnectReconciler) handleDeleteCluster(ctx context.Context, kc *v1 return reconcile.Result{}, err } - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(kc, models.DeletionFinalizer) kc.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, kc, patch) @@ -467,7 +467,7 @@ func (r *KafkaConnectReconciler) startClusterOnPremisesIPsJob(k *v1beta1.KafkaCo func (r *KafkaConnectReconciler) startSyncJob(kc *v1beta1.KafkaConnect) error { job := r.newSyncJob(kc) - err := r.Scheduler.ScheduleJob(kc.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(kc.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -484,7 +484,7 @@ func (r *KafkaConnectReconciler) newSyncJob(kc *v1beta1.KafkaConnect) scheduler. if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -673,7 +673,7 @@ func (r *KafkaConnectReconciler) handleExternalDelete(ctx context.Context, kc *v r.EventRecorder.Eventf(kc, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) r.Scheduler.RemoveJob(kc.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(kc.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/kafkaconnect_controller_test.go b/controllers/clusters/kafkaconnect_controller_test.go index 9ac0f6d06..81ce88858 100644 --- a/controllers/clusters/kafkaconnect_controller_test.go +++ b/controllers/clusters/kafkaconnect_controller_test.go @@ -90,7 +90,7 @@ var _ = Describe("Kafka Connect Controller", func() { Expect(k8sClient.Get(ctx, kafkaConnectNamespacedName, &kafkaConnect)).Should(Succeed()) patch := kafkaConnect.NewPatch() - kafkaConnect.Spec.DataCentres[0].NumberOfNodes = newKafkaConnectNodeNumbers + kafkaConnect.Spec.DataCentres[0].NodesNumber = newKafkaConnectNodeNumbers Expect(k8sClient.Patch(ctx, &kafkaConnect, patch)).Should(Succeed()) By("sending a resize request to the Instaclustr API. And when the resize is completed, " + diff --git a/controllers/clusters/on_premises.go b/controllers/clusters/on_premises.go index 22d4bbd72..3aec00455 100644 --- a/controllers/clusters/on_premises.go +++ b/controllers/clusters/on_premises.go @@ -45,7 +45,9 @@ type onPremisesBootstrap struct { K8sClient client.Client K8sObject client.Object EventRecorder record.EventRecorder - ClusterStatus v1beta1.ClusterStatus + ClusterID string + DataCentreID string + Nodes []*v1beta1.Node OnPremisesSpec *v1beta1.OnPremisesSpec ExposePorts []k8scorev1.ServicePort HeadlessPorts []k8scorev1.ServicePort @@ -57,7 +59,9 @@ func newOnPremisesBootstrap( k8sClient client.Client, o client.Object, e record.EventRecorder, - status v1beta1.ClusterStatus, + clusterID string, + dataCentreID string, + nodes []*v1beta1.Node, onPremisesSpec *v1beta1.OnPremisesSpec, exposePorts, headlessPorts []k8scorev1.ServicePort, @@ -67,7 +71,9 @@ func newOnPremisesBootstrap( K8sClient: k8sClient, K8sObject: o, EventRecorder: e, - ClusterStatus: status, + ClusterID: clusterID, + DataCentreID: dataCentreID, + Nodes: nodes, OnPremisesSpec: onPremisesSpec, ExposePorts: exposePorts, HeadlessPorts: headlessPorts, @@ -77,7 +83,7 @@ func newOnPremisesBootstrap( //nolint:unused,deadcode func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBootstrap) error { - if len(b.ClusterStatus.DataCentres) < 1 { + if b.DataCentreID == "" { return fmt.Errorf("datacenter ID is empty") } @@ -108,7 +114,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e ctx, b, gatewayDVName, - b.ClusterStatus.DataCentres[0].ID, + b.DataCentreID, gatewayDVSize, true, ) @@ -139,7 +145,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e ctx, b, gatewayName, - b.ClusterStatus.DataCentres[0].ID, + b.DataCentreID, models.GatewayRack, gatewayDV.Name, gatewayCPU, @@ -168,7 +174,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e b, gatewaySvcName, gatewayName, - b.ClusterStatus.DataCentres[0].ID, + b.DataCentreID, ) err = b.K8sClient.Create(ctx, gatewayExposeService) if err != nil { @@ -190,7 +196,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e b, clusterIPServiceName, gatewayName, - b.ClusterStatus.DataCentres[0].ID, + b.DataCentreID, ) err = b.K8sClient.Create(ctx, nodeExposeService) if err != nil { @@ -203,7 +209,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e //nolint:unused,deadcode func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error { - for i, node := range b.ClusterStatus.DataCentres[0].Nodes { + for i, node := range b.Nodes { nodeOSDiskSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) if err != nil { return err @@ -408,7 +414,7 @@ func newDataDiskDV( Name: name, Namespace: b.K8sObject.GetNamespace(), Labels: map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeIDLabel: nodeID, }, Finalizers: []string{models.DeletionFinalizer}, @@ -455,7 +461,7 @@ func newVM( } labelSet := map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeIDLabel: nodeID, models.NodeRackLabel: nodeRack, models.KubevirtDomainLabel: vmName, @@ -602,7 +608,7 @@ func newExposeService( Name: svcName, Namespace: b.K8sObject.GetNamespace(), Labels: map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeIDLabel: nodeID, }, Finalizers: []string{models.DeletionFinalizer}, @@ -632,7 +638,7 @@ func newHeadlessService( Name: svcName, Namespace: b.K8sObject.GetNamespace(), Labels: map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, }, Finalizers: []string{models.DeletionFinalizer}, }, @@ -640,7 +646,7 @@ func newHeadlessService( ClusterIP: k8scorev1.ClusterIPNone, Ports: b.HeadlessPorts, Selector: map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeLabel: models.WorkerNode, }, }, @@ -778,7 +784,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job allNodePods := &k8scorev1.PodList{} err := b.K8sClient.List(context.Background(), allNodePods, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeLabel: models.WorkerNode, }), Namespace: b.K8sObject.GetNamespace(), @@ -786,7 +792,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job if err != nil { l.Error(err, "Cannot get on-premises cluster pods", "cluster name", b.K8sObject.GetName(), - "clusterID", b.ClusterStatus.ID, + "clusterID", b.ClusterID, ) b.EventRecorder.Eventf( @@ -797,11 +803,11 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job return err } - if len(allNodePods.Items) != len(b.ClusterStatus.DataCentres[0].Nodes) { + if len(allNodePods.Items) != len(b.Nodes) { err = fmt.Errorf("the quantity of pods does not match the number of on-premises nodes") l.Error(err, "Cannot compare private IPs for the cluster", "cluster name", b.K8sObject.GetName(), - "clusterID", b.ClusterStatus.ID, + "clusterID", b.ClusterID, ) b.EventRecorder.Eventf( @@ -812,11 +818,11 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job return err } - for _, node := range b.ClusterStatus.DataCentres[0].Nodes { + for _, node := range b.Nodes { nodePods := &k8scorev1.PodList{} err = b.K8sClient.List(context.Background(), nodePods, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeIDLabel: node.ID, }), Namespace: b.K8sObject.GetNamespace(), @@ -824,7 +830,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job if err != nil { l.Error(err, "Cannot get on-premises cluster pods", "cluster name", b.K8sObject.GetName(), - "clusterID", b.ClusterStatus.ID, + "clusterID", b.ClusterID, ) b.EventRecorder.Eventf( @@ -842,7 +848,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job err = fmt.Errorf("private IPs was changed") l.Error(err, "Node's private IP addresses are not equal", "cluster name", b.K8sObject.GetName(), - "clusterID", b.ClusterStatus.ID, + "clusterID", b.ClusterID, "nodeID", node.ID, "nodeIP", node.PrivateAddress, "podIP", pod.Status.PodIP, @@ -861,7 +867,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job nodeSVCs := &k8scorev1.ServiceList{} err = b.K8sClient.List(context.Background(), nodeSVCs, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeIDLabel: node.ID, }), Namespace: b.K8sObject.GetNamespace(), @@ -869,7 +875,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job if err != nil { l.Error(err, "Cannot get services backed by on-premises cluster pods", "cluster name", b.K8sObject.GetName(), - "clusterID", b.ClusterStatus.ID, + "clusterID", b.ClusterID, ) b.EventRecorder.Eventf( @@ -887,7 +893,7 @@ func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job err = fmt.Errorf("public IPs was changed") l.Error(err, "Node's public IP addresses are not equal", "cluster name", b.K8sObject.GetName(), - "clusterID", b.ClusterStatus.ID, + "clusterID", b.ClusterID, "nodeID", node.ID, "nodeIP", node.PrivateAddress, "svcIP", svc.Status.LoadBalancer.Ingress[0].IP, @@ -924,7 +930,7 @@ func newClusterIPService( Name: svcName, Namespace: b.K8sObject.GetNamespace(), Labels: map[string]string{ - models.ClusterIDLabel: b.ClusterStatus.ID, + models.ClusterIDLabel: b.ClusterID, models.NodeIDLabel: nodeID, }, Finalizers: []string{models.DeletionFinalizer}, diff --git a/controllers/clusters/opensearch_controller.go b/controllers/clusters/opensearch_controller.go index 696294cb9..c7fa5a41e 100644 --- a/controllers/clusters/opensearch_controller.go +++ b/controllers/clusters/opensearch_controller.go @@ -448,7 +448,7 @@ func (r *OpenSearchReconciler) HandleDeleteCluster( r.Scheduler.RemoveJob(o.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(o.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(o.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(o.GetJobID(scheduler.SyncJob)) logger.Info("Deleting cluster backup resources", "cluster ID", o.Status.ID, @@ -518,7 +518,7 @@ func (r *OpenSearchReconciler) HandleDeleteCluster( func (r *OpenSearchReconciler) startClusterSyncJob(cluster *v1beta1.OpenSearch) error { job := r.newSyncJob(cluster) - err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -549,7 +549,7 @@ func (r *OpenSearchReconciler) startUsersCreationJob(cluster *v1beta1.OpenSearch } func (r *OpenSearchReconciler) newSyncJob(o *v1beta1.OpenSearch) scheduler.Job { - l := log.Log.WithValues("syncJob", o.GetJobID(scheduler.StatusChecker), "clusterID", o.Status.ID) + l := log.Log.WithValues("syncJob", o.GetJobID(scheduler.SyncJob), "clusterID", o.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(o) @@ -559,7 +559,7 @@ func (r *OpenSearchReconciler) newSyncJob(o *v1beta1.OpenSearch) scheduler.Job { "namespaced name", namespacedName) r.Scheduler.RemoveJob(o.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(o.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(o.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(o.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -993,7 +993,7 @@ func (r *OpenSearchReconciler) handleExternalDelete(ctx context.Context, o *v1be r.Scheduler.RemoveJob(o.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(o.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(o.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(o.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index c972ba737..43125caeb 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -266,7 +266,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( r.EventRecorder.Eventf( pg, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) if pg.Spec.DataCentres[0].CloudProvider == models.ONPREMISES { @@ -531,7 +531,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( ) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(pg, models.DeletionFinalizer) pg.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent @@ -698,7 +698,7 @@ func (r *PostgreSQLReconciler) startClusterOnPremisesIPsJob(pg *v1beta1.PostgreS func (r *PostgreSQLReconciler) startClusterStatusJob(pg *v1beta1.PostgreSQL) error { job := r.newWatchStatusJob(pg) - err := r.Scheduler.ScheduleJob(pg.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(pg.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -718,7 +718,7 @@ func (r *PostgreSQLReconciler) startClusterBackupsJob(pg *v1beta1.PostgreSQL) er } func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) scheduler.Job { - l := log.Log.WithValues("syncJob", pg.GetJobID(scheduler.StatusChecker), "clusterID", pg.Status.ID) + l := log.Log.WithValues("syncJob", pg.GetJobID(scheduler.SyncJob), "clusterID", pg.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(pg) @@ -727,7 +727,7 @@ func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) schedul l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.UserCreator)) return nil } @@ -1348,7 +1348,7 @@ func (r *PostgreSQLReconciler) handleExternalDelete(ctx context.Context, pg *v1b r.Scheduler.RemoveJob(pg.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(pg.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go index 89eda25db..68b451d8c 100644 --- a/controllers/clusters/redis_controller.go +++ b/controllers/clusters/redis_controller.go @@ -473,7 +473,7 @@ func (r *RedisReconciler) handleDeleteCluster( } } - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.SyncJob)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) l.Info("Deleting cluster backup resources", @@ -567,7 +567,7 @@ func (r *RedisReconciler) startClusterOnPremisesIPsJob(redis *v1beta1.Redis, b * func (r *RedisReconciler) startSyncJob(cluster *v1beta1.Redis) error { job := r.newSyncJob(cluster) - err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(cluster.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -632,7 +632,7 @@ func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Jo } func (r *RedisReconciler) newSyncJob(redis *v1beta1.Redis) scheduler.Job { - l := log.Log.WithValues("syncJob", redis.GetJobID(scheduler.StatusChecker), "clusterID", redis.Status.ID) + l := log.Log.WithValues("syncJob", redis.GetJobID(scheduler.SyncJob), "clusterID", redis.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(redis) @@ -642,7 +642,7 @@ func (r *RedisReconciler) newSyncJob(redis *v1beta1.Redis) scheduler.Job { "namespaced name", namespacedName) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.UserCreator)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.SyncJob)) return nil } @@ -1022,7 +1022,7 @@ func (r *RedisReconciler) handleExternalDelete(ctx context.Context, redis *v1bet r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.SyncJob)) return nil } diff --git a/controllers/clusters/zookeeper_controller.go b/controllers/clusters/zookeeper_controller.go index 369b5f142..c64d0bb14 100644 --- a/controllers/clusters/zookeeper_controller.go +++ b/controllers/clusters/zookeeper_controller.go @@ -18,7 +18,9 @@ package clusters import ( "context" + "encoding/json" "errors" + "fmt" "github.com/go-logr/logr" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -101,6 +103,52 @@ func (r *ZookeeperReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( } } +func (r *ZookeeperReconciler) createCluster(ctx context.Context, zook *v1beta1.Zookeeper, l logr.Logger) error { + l.Info("Creating zookeeper cluster", + "cluster name", zook.Spec.Name, + "data centres", zook.Spec.DataCentres) + + b, err := r.API.CreateClusterRaw(instaclustr.ZookeeperEndpoint, zook.Spec.ToInstAPI()) + if err != nil { + return fmt.Errorf("failed to create zookeeper cluster, err: %w", err) + } + + var instaModel models.ZookeeperCluster + err = json.Unmarshal(b, &instaModel) + if err != nil { + return fmt.Errorf("failed to unmarshal body to models.ZookeeperCluster, err: %w", err) + } + + patch := zook.NewPatch() + + zook.Spec.FromInstAPI(&instaModel) + zook.Annotations[models.ResourceStateAnnotation] = models.SyncingEvent + err = r.Patch(ctx, zook, patch) + if err != nil { + return fmt.Errorf("failed to patch cluster spec, err: %w", err) + } + + zook.Status.FromInstAPI(&instaModel) + err = r.Status().Patch(ctx, zook, patch) + if err != nil { + return fmt.Errorf("failed to patch cluster status, err: %w", err) + } + + l.Info("Zookeeper cluster has been created", "cluster ID", zook.Status.ID) + r.EventRecorder.Eventf( + zook, models.Normal, models.Created, + "Cluster creation request is sent. Cluster ID: %s", + zook.Status.ID, + ) + + err = r.createDefaultSecret(ctx, zook, l) + if err != nil { + return err + } + + return nil +} + func (r *ZookeeperReconciler) handleCreateCluster( ctx context.Context, zook *v1beta1.Zookeeper, @@ -110,41 +158,17 @@ func (r *ZookeeperReconciler) handleCreateCluster( l = l.WithName("Creation Event") if zook.Status.ID == "" { - l.Info("Creating zookeeper cluster", - "cluster name", zook.Spec.Name, - "data centres", zook.Spec.DataCentres) - - patch := zook.NewPatch() - - zook.Status.ID, err = r.API.CreateCluster(instaclustr.ZookeeperEndpoint, zook.Spec.ToInstAPI()) - if err != nil { - l.Error(err, "Cannot create zookeeper cluster", "spec", zook.Spec) - r.EventRecorder.Eventf( - zook, models.Warning, models.CreationFailed, - "Cluster creation on the Instaclustr is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } - - r.EventRecorder.Eventf( - zook, models.Normal, models.Created, - "Cluster creation request is sent. Cluster ID: %s", - zook.Status.ID, - ) - - err = r.Status().Patch(ctx, zook, patch) + err := r.createCluster(ctx, zook, l) if err != nil { - l.Error(err, "Cannot patch zookeeper cluster status from the Instaclustr API", - "spec", zook.Spec) - r.EventRecorder.Eventf( - zook, models.Warning, models.PatchFailed, - "Cluster resource status patch is failed. Reason: %v", - err, + r.EventRecorder.Eventf(zook, models.Warning, models.CreationFailed, + "Failed to create Zookeeper cluster. Reason: %v", err, ) return reconcile.Result{}, err } + } + if zook.Status.State != models.DeletedStatus { + patch := zook.NewPatch() zook.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent controllerutil.AddFinalizer(zook, models.DeletionFinalizer) err = r.Patch(ctx, zook, patch) @@ -158,26 +182,7 @@ func (r *ZookeeperReconciler) handleCreateCluster( return reconcile.Result{}, err } - l.Info("Zookeeper cluster has been created", "cluster ID", zook.Status.ID) - - err = r.createDefaultSecret(ctx, zook, l) - if err != nil { - l.Error(err, "Cannot create default secret for Zookeeper cluster", - "cluster name", zook.Spec.Name, - "clusterID", zook.Status.ID, - ) - r.EventRecorder.Eventf( - zook, models.Warning, models.CreationFailed, - "Default user secret creation on the Instaclustr is failed. Reason: %v", - err, - ) - - return reconcile.Result{}, err - } - } - - if zook.Status.State != models.DeletedStatus { - err = r.startClusterStatusJob(zook) + err = r.startClusterSyncJob(zook) if err != nil { l.Error(err, "Cannot start cluster status job", "zookeeper cluster ID", zook.Status.ID) @@ -191,7 +196,7 @@ func (r *ZookeeperReconciler) handleCreateCluster( r.EventRecorder.Eventf( zook, models.Normal, models.Created, - "Cluster status check job is started", + "Cluster sync job is started", ) } @@ -253,24 +258,21 @@ func (r *ZookeeperReconciler) handleUpdateCluster( ) (reconcile.Result, error) { l = l.WithName("Update Event") - iData, err := r.API.GetZookeeper(zook.Status.ID) + instaModel, err := r.API.GetZookeeper(zook.Status.ID) if err != nil { l.Error(err, "Cannot get cluster from the Instaclustr", "cluster ID", zook.Status.ID) return reconcile.Result{}, err } - iZook, err := zook.FromInstAPI(iData) - if err != nil { - l.Error(err, "Cannot convert cluster from the Instaclustr API", "cluster ID", zook.Status.ID) - return reconcile.Result{}, err - } + iZook := &v1beta1.Zookeeper{} + iZook.FromInstAPI(instaModel) if zook.Annotations[models.ExternalChangesAnnotation] == models.True || r.RateLimiter.NumRequeues(req) == rlimiter.DefaultMaxTries { return handleExternalChanges[v1beta1.ZookeeperSpec](r.EventRecorder, r.Client, zook, iZook, l) } - if zook.Spec.ClusterSettingsNeedUpdate(iZook.Spec.Cluster) { + if zook.Spec.ClusterSettingsNeedUpdate(&iZook.Spec.GenericClusterSpec) { l.Info("Updating cluster settings", "instaclustr description", iZook.Spec.Description, "instaclustr two factor delete", iZook.Spec.TwoFactorDelete) @@ -300,7 +302,7 @@ func (r *ZookeeperReconciler) handleDeleteCluster( if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error(err, "Cannot get zookeeper cluster", "cluster name", zook.Spec.Name, - "status", zook.Status.ClusterStatus.State) + "status", zook.Status.GenericStatus.State) r.EventRecorder.Eventf( zook, models.Warning, models.FetchFailed, "Cluster resource fetch from the Instaclustr API is failed. Reason: %v", @@ -366,7 +368,7 @@ func (r *ZookeeperReconciler) handleDeleteCluster( return reconcile.Result{}, err } - r.Scheduler.RemoveJob(zook.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(zook.GetJobID(scheduler.SyncJob)) controllerutil.RemoveFinalizer(zook, models.DeletionFinalizer) zook.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, zook, patch) @@ -403,10 +405,10 @@ func (r *ZookeeperReconciler) handleDeleteCluster( return models.ExitReconcile, nil } -func (r *ZookeeperReconciler) startClusterStatusJob(Zookeeper *v1beta1.Zookeeper) error { - job := r.newWatchStatusJob(Zookeeper) +func (r *ZookeeperReconciler) startClusterSyncJob(Zookeeper *v1beta1.Zookeeper) error { + job := r.newSyncJob(Zookeeper) - err := r.Scheduler.ScheduleJob(Zookeeper.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(Zookeeper.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -414,15 +416,16 @@ func (r *ZookeeperReconciler) startClusterStatusJob(Zookeeper *v1beta1.Zookeeper return nil } -func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) scheduler.Job { - l := log.Log.WithValues("component", "ZookeeperStatusClusterJob") +func (r *ZookeeperReconciler) newSyncJob(zook *v1beta1.Zookeeper) scheduler.Job { + l := log.Log.WithValues("syncJob", zook.GetJobID(scheduler.SyncJob), "clusterID", zook.Status.ID) + return func() error { namespacedName := client.ObjectKeyFromObject(zook) err := r.Get(context.Background(), namespacedName, zook) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(zook.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(zook.GetJobID(scheduler.SyncJob)) return nil } if err != nil { @@ -431,7 +434,7 @@ func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) schedul return err } - iData, err := r.API.GetZookeeper(zook.Status.ID) + instaModel, err := r.API.GetZookeeper(zook.Status.ID) if err != nil { if errors.Is(err, instaclustr.NotFound) { if zook.DeletionTimestamp != nil { @@ -447,21 +450,16 @@ func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) schedul return err } - iZook, err := zook.FromInstAPI(iData) - if err != nil { - l.Error(err, "Cannot convert cluster from the Instaclustr API", "cluster ID", zook.Status.ID) - return err - } + iZook := &v1beta1.Zookeeper{} + iZook.FromInstAPI(instaModel) - if !areStatusesEqual(&zook.Status.ClusterStatus, &iZook.Status.ClusterStatus) { - l.Info("Updating Zookeeper status", - "instaclustr status", iZook.Status, - "status", zook.Status) + if !zook.Status.Equals(&iZook.Status) { + l.Info("Updating Zookeeper status") - areDCsEqual := areDataCentresEqual(iZook.Status.ClusterStatus.DataCentres, zook.Status.ClusterStatus.DataCentres) + areDCsEqual := zook.Status.DCsEquals(iZook.Status.DataCentres) patch := zook.NewPatch() - zook.Status.ClusterStatus = iZook.Status.ClusterStatus + zook.Status.FromInstAPI(instaModel) err = r.Status().Patch(context.Background(), zook, patch) if err != nil { l.Error(err, "Cannot patch Zookeeper cluster", @@ -473,14 +471,14 @@ func (r *ZookeeperReconciler) newWatchStatusJob(zook *v1beta1.Zookeeper) schedul if !areDCsEqual { var nodes []*v1beta1.Node - for _, dc := range iZook.Status.ClusterStatus.DataCentres { + for _, dc := range iZook.Status.DataCentres { nodes = append(nodes, dc.Nodes...) } err = exposeservice.Create(r.Client, zook.Name, zook.Namespace, - zook.Spec.PrivateNetworkCluster, + zook.Spec.PrivateNetwork, nodes, models.ZookeeperConnectionPort) if err != nil { @@ -552,7 +550,7 @@ func (r *ZookeeperReconciler) handleExternalDelete(ctx context.Context, zook *v1 r.EventRecorder.Eventf(zook, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) r.Scheduler.RemoveJob(zook.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(zook.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(zook.GetJobID(scheduler.SyncJob)) return nil } @@ -577,6 +575,10 @@ func (r *ZookeeperReconciler) SetupWithManager(mgr ctrl.Manager) error { return true } + if newObj.Status.ID == "" && newObj.Annotations[models.ResourceStateAnnotation] == models.SyncingEvent { + return false + } + if newObj.Status.ID == "" { newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent return true @@ -607,7 +609,7 @@ func (r *ZookeeperReconciler) reconcileMaintenanceEvents(ctx context.Context, z return err } - if !z.Status.AreMaintenanceEventStatusesEqual(iMEStatuses) { + if !z.Status.MaintenanceEventsEqual(iMEStatuses) { patch := z.NewPatch() z.Status.MaintenanceEvents = iMEStatuses err = r.Status().Patch(ctx, z, patch) diff --git a/controllers/kafkamanagement/mirror_controller.go b/controllers/kafkamanagement/mirror_controller.go index 4b5677de0..752d8124b 100644 --- a/controllers/kafkamanagement/mirror_controller.go +++ b/controllers/kafkamanagement/mirror_controller.go @@ -231,7 +231,7 @@ func (r *MirrorReconciler) handleDeleteMirror( } patch := mirror.NewPatch() - r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.SyncJob)) mirror.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent controllerutil.RemoveFinalizer(mirror, models.DeletionFinalizer) err = r.Patch(ctx, mirror, patch) @@ -256,7 +256,7 @@ func (r *MirrorReconciler) handleDeleteMirror( func (r *MirrorReconciler) startClusterStatusJob(mirror *v1beta1.Mirror) error { job := r.newWatchStatusJob(mirror) - err := r.Scheduler.ScheduleJob(mirror.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(mirror.GetJobID(scheduler.SyncJob), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -275,7 +275,7 @@ func (r *MirrorReconciler) newWatchStatusJob(mirror *v1beta1.Mirror) scheduler.J r.EventRecorder.Eventf(mirror, models.Normal, models.Deleted, "Mirror is not found in the k8s cluster. Closing Instaclustr status sync.") - r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(mirror.GetJobID(scheduler.SyncJob)) return nil } if err != nil { diff --git a/pkg/instaclustr/client.go b/pkg/instaclustr/client.go index da6611529..8ad362ee0 100644 --- a/pkg/instaclustr/client.go +++ b/pkg/instaclustr/client.go @@ -504,7 +504,7 @@ func (c *Client) UpdateKafkaConnect(id string, kc models.KafkaConnectAPIUpdate) return nil } -func (c *Client) GetZookeeper(id string) ([]byte, error) { +func (c *Client) GetZookeeper(id string) (*models.ZookeeperCluster, error) { url := c.serverHostname + ZookeeperEndpoint + id resp, err := c.DoRequest(url, http.MethodGet, nil) @@ -526,7 +526,13 @@ func (c *Client) GetZookeeper(id string) ([]byte, error) { return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) } - return body, nil + var cluster models.ZookeeperCluster + err = json.Unmarshal(body, &cluster) + if err != nil { + return nil, err + } + + return &cluster, nil } func (c *Client) UpdateDescriptionAndTwoFactorDelete(clusterEndpoint, clusterID, description string, twoFactorDelete *v1beta1.TwoFactorDelete) error { diff --git a/pkg/instaclustr/interfaces.go b/pkg/instaclustr/interfaces.go index 146462081..72b679645 100644 --- a/pkg/instaclustr/interfaces.go +++ b/pkg/instaclustr/interfaces.go @@ -84,7 +84,7 @@ type API interface { GetKafka(id string) (*models.KafkaCluster, error) GetKafkaConnect(id string) (*models.KafkaConnectCluster, error) UpdateKafkaConnect(id string, kc models.KafkaConnectAPIUpdate) error - GetZookeeper(id string) ([]byte, error) + GetZookeeper(id string) (*models.ZookeeperCluster, error) RestoreCluster(restoreData any, clusterKind string) (string, error) GetPostgreSQL(id string) (*models.PGCluster, error) UpdatePostgreSQL(id string, r *models.PGClusterUpdate) error diff --git a/pkg/instaclustr/mock/client.go b/pkg/instaclustr/mock/client.go index 99fdebea2..fb6da34f1 100644 --- a/pkg/instaclustr/mock/client.go +++ b/pkg/instaclustr/mock/client.go @@ -314,7 +314,7 @@ func (c *mockClient) GetKafkaConnect(id string) (*models.KafkaConnectCluster, er panic("GetKafkaConnect: is not implemented") } -func (c *mockClient) GetZookeeper(id string) ([]byte, error) { +func (c *mockClient) GetZookeeper(id string) (*models.ZookeeperCluster, error) { panic("GetZookeeper: is not implemented") } diff --git a/pkg/models/redis_apiv2.go b/pkg/models/redis_apiv2.go index 003d88a40..a91481135 100644 --- a/pkg/models/redis_apiv2.go +++ b/pkg/models/redis_apiv2.go @@ -32,8 +32,8 @@ type RedisDataCentre struct { NodeSize string `json:"nodeSize"` MasterNodes int `json:"masterNodes"` - ReplicaNodes int `json:"replicaNodes,omitempty"` - ReplicationFactor int `json:"replicationFactor,omitempty"` + ReplicaNodes int `json:"replicaNodes"` + ReplicationFactor int `json:"replicationFactor"` Nodes []*Node `json:"nodes,omitempty"` PrivateLink []*PrivateLink `json:"privateLink,omitempty"` diff --git a/pkg/models/zookeeper_apiv2.go b/pkg/models/zookeeper_apiv2.go index 1645b6184..17d86293d 100644 --- a/pkg/models/zookeeper_apiv2.go +++ b/pkg/models/zookeeper_apiv2.go @@ -17,21 +17,19 @@ limitations under the License. package models type ZookeeperCluster struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - ZookeeperVersion string `json:"zookeeperVersion,omitempty"` - CurrentClusterOperationStatus string `json:"currentClusterOperationStatus,omitempty"` - Status string `json:"status,omitempty"` - PrivateNetworkCluster bool `json:"privateNetworkCluster"` - SLATier string `json:"slaTier"` - TwoFactorDelete []*TwoFactorDelete `json:"twoFactorDelete,omitempty"` - DataCentres []*ZookeeperDataCentre `json:"dataCentres"` - Description string `json:"description,omitempty"` + GenericClusterFields `json:",inline"` + + ZookeeperVersion string `json:"zookeeperVersion,omitempty"` + DataCentres []*ZookeeperDataCentre `json:"dataCentres"` } type ZookeeperDataCentre struct { - DataCentre `json:",inline"` + GenericDataCentreFields `json:",inline"` + + NumberOfNodes int `json:"numberOfNodes"` + NodeSize string `json:"nodeSize"` ClientToServerEncryption bool `json:"clientToServerEncryption"` - EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` EnforceAuthEnabled bool `json:"enforceAuthEnabled"` + EnforceAuthSchemes []string `json:"enforceAuthSchemes,omitempty"` + Nodes []*Node `json:"nodes,omitempty"` } diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 2c0d5cbd2..0f1523667 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -29,7 +29,7 @@ var ClusterBackupsInterval time.Duration var UserCreationInterval time.Duration const ( - StatusChecker = "statusChecker" + SyncJob = "sync" BackupsChecker = "backupsChecker" UserCreator = "userCreator" OnPremisesIPsChecker = "onPremisesIPsChecker"