diff --git a/.gitignore b/.gitignore index 52aa4cdf..3bbc8285 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,7 @@ *.dll *.so *.dylib -bin +bin/* # Test binary, build with `go test -c` *.test diff --git a/Makefile b/Makefile index d8b222dd..f6199feb 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,8 @@ else GOBIN=$(shell go env GOBIN) endif -# 1. make generator : generate hack/boilerplate.go.txt -# 2. make manfifests: generator crd/base and manager and rbac resources to manifests yaml +# 1. make generator : generate DeepCopy, DeepCopyInto and DeepCopyObject +# 2. make manfifests: generator crd and rbac and webhook resources to manifests yaml # 3. make install: install crd into cluster # 4. make deploy: deploy all resource of manifests.yaml into cluster. # 5. make run: generate fmt vet manifests then go run ./main.go @@ -27,11 +27,11 @@ test: generate fmt vet manifests go test ./... -coverprofile cover.out -v # Build curve-operator binary -curve-operator: generate fmt vet +curve-operator: manifests fmt vet go build -o bin/curve-operator main.go # Run against the configured Kubernetes cluster in ~/.kube/config -run: generate fmt vet +run: curve-operator go run ./main.go # Install CRDs into a cluster diff --git a/api/v1/curvecluster_types.go b/api/v1/curvecluster_types.go index 360f2d5e..dbf2c7f7 100644 --- a/api/v1/curvecluster_types.go +++ b/api/v1/curvecluster_types.go @@ -17,150 +17,49 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const CustomResourceGroup = "curve.opencurve.io" - -// ConditionType represents a resource's status -type ConditionType string - -const ( - // ClusterPhasePending indicates the cluster is running to create. - ClusterPhasePending ConditionType = "Pending" - // ClusterPhaseReady indicates the cluster has been created successfully. - ClusterPhaseReady ConditionType = "Ready" //nolint:unused - // ClusterPhaseDeleting indicates the cluster is running to delete. - ClusterPhaseDeleting ConditionType = "Deleting" - // ClusterPhaseError indicates the cluster created failed because of some reason. - ClusterPhaseError ConditionType = "Failed" //nolint:unused - // ClusterPhaseUnknown is unknown phase - ClusterPhaseUnknown ConditionType = "Unknown" //nolint:unused -) - -const ( - // ConditionTypeEtcdReady indicates the etcd is ready - ConditionTypeEtcdReady ConditionType = "EtcdReady" - // ConditionTypeMdsReady indicates the mds is ready - ConditionTypeMdsReady ConditionType = "MdsReady" - // ConditionTypeFormatedReady indicates the formated job is ready - ConditionTypeFormatedReady ConditionType = "formatedReady" - // ConditionTypeChunkServerReady indicates the chunk server is ready - ConditionTypeChunkServerReady ConditionType = "ChunkServerReady" - // ConditionTypeMetaServerReady indicates the meta server is ready - ConditionTypeMetaServerReady ConditionType = "MetaServerReady" - // ConditionTypeSnapShotCloneReady indicates the snapshot clone is ready - ConditionTypeSnapShotCloneReady ConditionType = "SnapShotCloneReady" - // ConditionTypeDeleting indicates it's deleting - ConditionTypeDeleting ConditionType = "Deleting" - // ConditionTypeClusterReady indicates the cluster is ready - ConditionTypeClusterReady ConditionType = "Ready" - // ConditionTypeFailure indicates it's failed - ConditionTypeFailure ConditionType = "Failed" - // ConditionTypeUnknown is unknown condition - ConditionTypeUnknown ConditionType = "Unknown" //nolint:unused -) - -type ConditionStatus string - -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" //nolint:unused - ConditionUnknown ConditionStatus = "Unknown" //nolint:unused -) - -type ConditionReason string - -const ( - ConditionEtcdClusterCreatedReason ConditionReason = "EtcdClusterCreated" - ConditionMdsClusterCreatedReason ConditionReason = "MdsClusterCreated" - ConditionFormatingChunkfilePoolReason ConditionReason = "FormatingChunkfilePool" - ConditionFormatChunkfilePoolReason ConditionReason = "FormatedChunkfilePool" - ConditionMetaServerClusterCreatedReason ConditionReason = "MetaServerClusterCreated" - ConditionChunkServerClusterCreatedReason ConditionReason = "ChunkServerClusterCreated" - ConditionSnapShotCloneClusterCreatedReason ConditionReason = "SnapShotCloneClusterCreated" - ConditionClusterCreatedReason ConditionReason = "ClusterCreated" //nolint:unused - ConditionReconcileSucceeded ConditionReason = "ReconcileSucceeded" - ConditionReconcileFailed ConditionReason = "ReconcileFailed" - ConditionDeletingClusterReason ConditionReason = "Deleting" -) - -type ClusterCondition struct { - // Type is the type of condition. - Type ConditionType `json:"type,omitempty"` - // Status is the status of condition - // Can be True, False or Unknown. - Status ConditionStatus `json:"status,omitempty"` - // ObservedGeneration - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // LastTransitionTime specifies last time the condition transitioned - // from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - Reason ConditionReason `json:"reason,omitempty"` - // Message is a human readable message indicating details about last transition. - Message string `json:"message,omitempty"` -} - -type ClusterVersion struct { - Image string `json:"image,omitempty"` -} - // CurveClusterSpec defines the desired state of CurveCluster type CurveClusterSpec struct { // +optional CurveVersion CurveVersionSpec `json:"curveVersion,omitempty"` - // +optional Nodes []string `json:"nodes,omitempty"` - // +optional - HostDataDir string `json:"hostDataDir,omitempty"` - + DataDir string `json:"dataDir,omitempty"` // +optional - Etcd EtcdSpec `json:"etcd,omitempty"` - + LogDir string `json:"logDir,omitempty"` // +optional - Mds MdsSpec `json:"mds,omitempty"` - + Copysets *int `json:"copysets,omitempty"` // +optional - SnapShotClone SnapShotCloneSpec `json:"snapShotClone,omitempty"` - + Etcd *EtcdSpec `json:"etcd,omitempty"` // +optional - Storage StorageScopeSpec `json:"storage,omitempty"` - - // Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster - // deletion is not imminent. + Mds *MdsSpec `json:"mds,omitempty"` // +optional - // +nullable - CleanupConfirm string `json:"cleanupConfirm,omitempty"` - + Chunkserver *StorageScopeSpec `json:"chunkserver,omitempty"` // +optional - Monitor MonitorSpec `json:"monitor,omitempty"` + SnapShotClone *SnapShotCloneSpec `json:"snapshotclone,omitempty"` } // CurveClusterStatus defines the observed state of CurveCluster type CurveClusterStatus struct { // Phase is a summary of cluster state. // It can be translated from the last conditiontype - Phase ConditionType `json:"phase,omitempty"` - + Phase ClusterPhase `json:"phase,omitempty"` // Condition contains current service state of cluster such as progressing/Ready/Failure... Conditions []ClusterCondition `json:"conditions,omitempty"` - // Message shows summary message of cluster from ClusterState // such as 'Curve Cluster Created successfully' Message string `json:"message,omitempty"` - // CurveVersion shows curve version info on status field - CurveVersion ClusterVersion `json:"curveVersion,omitempty"` + CurveVersion CurveVersionSpec `json:"curveVersion,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="HostDataDir",JSONPath=".spec.hostDataDir",type=string +// +kubebuilder:printcolumn:name="DataDir",JSONPath=".spec.dataDir",type=string +// +kubebuilder:printcolumn:name="LogDir",JSONPath=".spec.logDir",type=string // +kubebuilder:printcolumn:name="Version",JSONPath=".spec.curveVersion.image",type=string // +kubebuilder:printcolumn:name="Phase",JSONPath=".status.phase",type=string @@ -182,149 +81,6 @@ type CurveClusterList struct { Items []CurveCluster `json:"items"` } -// CurveVersionSpec represents the settings for the Curve version -type CurveVersionSpec struct { - // +optional - Image string `json:"image,omitempty"` - - // +kubebuilder:validation:Enum=IfNotPresent;Always;Never;"" - // +optional - ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` -} - -// EtcdSpec is the spec of etcd -type EtcdSpec struct { - // +optional - PeerPort int `json:"peerPort,omitempty"` - - // +optional - ClientPort int `json:"clientPort,omitempty"` - - // +optional - Config map[string]string `json:"config,omitempty"` -} - -// MdsSpec is the spec of mds -type MdsSpec struct { - // +optional - Port int `json:"port,omitempty"` - - // +optional - DummyPort int `json:"dummyPort,omitempty"` - - // +optional - Config map[string]string `json:"config,omitempty"` -} - -// SnapShotCloneSpec is the spec of snapshot clone -type SnapShotCloneSpec struct { - // +optional - Enable bool `json:"enable,omitempty"` - - // +optional - Port int `json:"port,omitempty"` - - // +optional - DummyPort int `json:"dummyPort,omitempty"` - - // +optional - ProxyPort int `json:"proxyPort,omitempty"` - - // +optional - S3Config S3ConfigSpec `json:"s3Config,omitempty"` -} - -// S3ConfigSpec is the spec of s3 config -type S3ConfigSpec struct { - AK string `json:"ak,omitempty"` - SK string `json:"sk,omitempty"` - NosAddress string `json:"nosAddress,omitempty"` - SnapShotBucketName string `json:"bucketName,omitempty"` -} - -// StorageScopeSpec is the spec of storage scope -type StorageScopeSpec struct { - // +optional - UseSelectedNodes bool `json:"useSelectedNodes,omitempty"` - - // +optional - Nodes []string `json:"nodes,omitempty"` - - // +optional - Port int `json:"port,omitempty"` - - // +optional - CopySets int `json:"copySets,omitempty"` - - // +optional - Devices []DevicesSpec `json:"devices,omitempty"` - - // +optional - SelectedNodes []SelectedNodesSpec `json:"selectedNodes,omitempty"` -} - -// DevicesSpec represents a disk to use in the cluster -type DevicesSpec struct { - // +optional - Name string `json:"name,omitempty"` - - // +optional - MountPath string `json:"mountPath,omitempty"` - - // +optional - Percentage int `json:"percentage,omitempty"` -} - -type SelectedNodesSpec struct { - Node string `json:"node,omitempty"` - Devices []DevicesSpec `json:"devices,omitempty"` -} - -type MonitorSpec struct { - Enable bool `json:"enable,omitempty"` - // +optional - MonitorHost string `json:"monitorHost,omitempty"` - // +optional - Prometheus PrometheusSpec `json:"prometheus,omitempty"` - // +optional - Grafana GrafanaSpec `json:"grafana,omitempty"` - // +optional - NodeExporter NodeExporterSpec `json:"nodeExporter,omitempty"` -} - -type PrometheusSpec struct { - // +optional - ContainerImage string `json:"containerImage,omitempty"` - // +optional - DataDir string `json:"dataDir,omitempty"` - // +optional - ListenPort int `json:"listenPort,omitempty"` - // +optional - RetentionTime string `json:"retentionTime,omitempty"` - // +optional - RetentionSize string `json:"retentionSize,omitempty"` -} - -type GrafanaSpec struct { - // +optional - ContainerImage string `json:"containerImage,omitempty"` - // +optional - DataDir string `json:"dataDir,omitempty"` - // +optional - ListenPort int `json:"listenPort,omitempty"` - // +optional - UserName string `json:"userName,omitempty"` - // +optional - PassWord string `json:"passWord,omitempty"` -} - -type NodeExporterSpec struct { - // +optional - ContainerImage string `json:"containerImage,omitempty"` - // +optional - ListenPort int `json:"listenPort,omitempty"` -} - func init() { SchemeBuilder.Register(&CurveCluster{}, &CurveClusterList{}) } diff --git a/api/v1/curvecluster_webhook.go b/api/v1/curvecluster_webhook.go new file mode 100644 index 00000000..a41ac2c6 --- /dev/null +++ b/api/v1/curvecluster_webhook.go @@ -0,0 +1,88 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var curveclusterlog = logf.Log.WithName("curvecluster-resource") + +func (r *CurveCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-operator-curve-io-v1-curvecluster,mutating=true,failurePolicy=fail,groups=operator.curve.io,resources=curveclusters,verbs=create;update,versions=v1,name=mcurvecluster.kb.io + +var _ webhook.Defaulter = &CurveCluster{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *CurveCluster) Default() { + curveclusterlog.Info("defaulting CurveCluster", "CurveClsuter", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-operator-curve-io-v1-curvecluster,mutating=false,failurePolicy=fail,groups=operator.curve.io,resources=curveclusters,versions=v1,name=vcurvecluster.kb.io + +var _ webhook.Validator = &CurveCluster{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *CurveCluster) ValidateCreate() error { + curvefslog.Info("validating creation of CurveCluster", "CurveCluster", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your validation logic upon object creation. + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *CurveCluster) ValidateUpdate(old runtime.Object) error { + curvefslog.Info("validating update of CurveCluster", "CurveCluster", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *CurveCluster) ValidateDelete() error { + curvefslog.Info("validating deletion of CurveCluster", "CurveCluster", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your validation logic upon object deletion. + return nil +} diff --git a/api/v1/curvefs_types.go b/api/v1/curvefs_types.go index 482635ff..89f42b34 100644 --- a/api/v1/curvefs_types.go +++ b/api/v1/curvefs_types.go @@ -24,70 +24,48 @@ import ( type CurvefsSpec struct { // +optional CurveVersion CurveVersionSpec `json:"curveVersion,omitempty"` - // +optional Nodes []string `json:"nodes,omitempty"` - - // +optional - HostDataDir string `json:"hostDataDir,omitempty"` - - // +optional - Etcd EtcdSpec `json:"etcd,omitempty"` - - // +optional - Mds MdsSpec `json:"mds,omitempty"` - - // +optional - MetaServer MetaServerSpec `json:"metaserver,omitempty"` - - // +optional - SnapShotClone SnapShotCloneSpec `json:"snapShotClone,omitempty"` - // +optional - Monitor MonitorSpec `json:"monitor,omitempty"` - - // Indicates user intent when deleting a cluster; blocks orchestration and should not be set if cluster - // deletion is not imminent. + DataDir string `json:"dataDir,omitempty"` // +optional - // +nullable - CleanupConfirm string `json:"cleanupConfirm,omitempty"` -} - -// MdsSpec is the spec of mds -type MetaServerSpec struct { + LogDir string `json:"logDir,omitempty"` // +optional - Port int `json:"port,omitempty"` - + Copysets *int `json:"copysets,omitempty"` // +optional - ExternalPort int `json:"externalPort,omitempty"` - + Etcd *EtcdSpec `json:"etcd,omitempty"` // +optional - CopySets int `json:"copySets,omitempty"` - + Mds *MdsSpec `json:"mds,omitempty"` // +optional - Config map[string]string `json:"config,omitempty"` + MetaServer *MetaServerSpec `json:"metaserver,omitempty"` } // CurvefsStatus defines the observed state of Curvefs type CurvefsStatus struct { // Phase is a summary of cluster state. // It can be translated from the last conditiontype - Phase ConditionType `json:"phase,omitempty"` - + // ClusterPending: The cluster has been accepted by system, but in the process + // ClusterRunning: The cluster is healthy and is running process + // ClusterDeleting: The cluster is in deleting process + // ClusterUnknown: The cluster state is unknown + Phase ClusterPhase `json:"phase,omitempty"` // Condition contains current service state of cluster such as progressing/Ready/Failure... Conditions []ClusterCondition `json:"conditions,omitempty"` - // Message shows summary message of cluster from ClusterState // such as 'Curve Cluster Created successfully' Message string `json:"message,omitempty"` - - // CurveVersion shows curve version info on status field - CurveVersion ClusterVersion `json:"curveVersion,omitempty"` + // CurveVersion shows curve version info on status field that judge iff upgrade + CurveVersion CurveVersionSpec `json:"curveVersion,omitempty"` + // LastModContextSet means that need to modify operatrion context + LastModContextSet LastModContextSet `json:"lastModContextSet,omitempty"` + // DataDir and LogDir is to compare and update + StorageDir StorageStatusDir `json:"storageStatusDir,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="HostDataDir",JSONPath=".spec.hostDataDir",type=string +// +kubebuilder:printcolumn:name="DataDir",JSONPath=".spec.dataDir",type=string +// +kubebuilder:printcolumn:name="LogDir",JSONPath=".spec.logDir",type=string // +kubebuilder:printcolumn:name="Version",JSONPath=".spec.curveVersion.image",type=string // +kubebuilder:printcolumn:name="Phase",JSONPath=".status.phase",type=string diff --git a/api/v1/curvefs_webhook.go b/api/v1/curvefs_webhook.go new file mode 100644 index 00000000..02168389 --- /dev/null +++ b/api/v1/curvefs_webhook.go @@ -0,0 +1,88 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var curvefslog = logf.Log.WithName("curvefs-resource") + +func (r *Curvefs) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-operator-curve-io-v1-curvefs,mutating=true,failurePolicy=fail,groups=operator.curve.io,resources=curvefs,verbs=create;update,versions=v1,name=mcurvefs.kb.io + +var _ webhook.Defaulter = &Curvefs{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *Curvefs) Default() { + curvefslog.Info("defaulting Curvefs", "Curvefs", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your defaulting logic. +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:verbs=create;update;delete,path=/validate-operator-curve-io-v1-curvefs,mutating=false,failurePolicy=fail,groups=operator.curve.io,resources=curvefs,versions=v1,name=vcurvefs.kb.io + +var _ webhook.Validator = &Curvefs{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *Curvefs) ValidateCreate() error { + curvefslog.Info("validating creation of Curvefs", "Curvefs", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your validation logic upon object creation. + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *Curvefs) ValidateUpdate(old runtime.Object) error { + curvefslog.Info("validating update of Curvefs", "Curvefs", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your validation logic upon object update. + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *Curvefs) ValidateDelete() error { + curvefslog.Info("validating deletion of Curvefs", "Curvefs", client.ObjectKey{ + Name: r.Name, + Namespace: r.Namespace, + }) + + // TODO(user): fill in your validation logic upon object deletion. + return nil +} diff --git a/api/v1/types.go b/api/v1/types.go new file mode 100644 index 00000000..a5eca5ed --- /dev/null +++ b/api/v1/types.go @@ -0,0 +1,214 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + PORT = "port" + PEER_PORT = "peerPort" + CLIENT_PORT = "clientPort" + DUMMY_PORT = "dummyPort" + PROXY_PORT = "proxyPort" + EXTERNAL_PORT = "externalPort" + + INSTANCES = "instances" +) + +type ClusterPhase string + +const ( + // ClusterCreating indicates the cluster is to be created. + ClusterCreating ClusterPhase = "Creating" + // ClusterPhaseReady indicates the cluster has been created successfully. + ClusterRunning ClusterPhase = "Running" + // ClusterUpdating indicates the cluster is to update config because of some server config change + ClusterUpdating ClusterPhase = "Updating" + // ClusterUpgrading indicates the cluster is to upgrade becasue 'Image' filed changed of 'CurveVersion' + ClusterUpgrading ClusterPhase = "Upgrading" + // ClusterScaling indicates the cluster is to scale becasue some server config change for chunkserver/metaserver replicas. + ClusterScaling ClusterPhase = "Scaling" + // ClusterPhaseDeleting indicates the cluster is running to delete. + ClusterDeleting ClusterPhase = "Deleting" + // ClusterPhaseUnknown means that for some reason the state of cluster could not be obtained. + ClusterPhaseUnknown ClusterPhase = "Unknown" +) + +// ConditionType represents a resource's status +type ConditionType string + +const ( + // ConditionProgressing represents Progressing state of an object + ConditionProgressing ConditionType = "Progressing" + // ConditionClusterReady indicates the cluster is ready + ConditionClusterReady ConditionType = "Ready" + // ConditionDeleting indicates it's deleting + ConditionDeleting ConditionType = "Deleting" + // ConditionFailure indicates it's failed + ConditionFailure ConditionType = "Failed" +) + +type ConditionStatus string + +const ( + ConditionStatusTrue ConditionStatus = "True" + ConditionStatusFalse ConditionStatus = "False" //nolint:unused + ConditionStatusUnknown ConditionStatus = "Unknown" //nolint:unused +) + +type ConditionReason string + +const ( + ConditionDeletingClusterReason ConditionReason = "Deleting" + ConditionReconcileStarted ConditionReason = "ReconcileStarted" + ConditionReconcileSucceeded ConditionReason = "ReconcileSucceeded" + ConditionReconcileFailed ConditionReason = "ReconcileFailed" +) + +type ClusterCondition struct { + // Type is the type of condition. + Type ConditionType `json:"type,omitempty"` + // Status is the status of condition + // Can be True, False or Unknown. + Status ConditionStatus `json:"status,omitempty"` + // LastTransitionTime specifies last time the condition transitioned + // from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + Reason ConditionReason `json:"reason,omitempty"` + // Message is a human readable message indicating details about last transition. + Message string `json:"message,omitempty"` +} + +// CurveVersionSpec represents the settings for the Curve version +type CurveVersionSpec struct { + // +optional + Image string `json:"image,omitempty"` +} + +// EtcdSpec is the spec of etcd +type EtcdSpec struct { + // +optional + PeerPort *int `json:"peerPort,omitempty"` + // +optional + ClientPort *int `json:"clientPort,omitempty"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// MdsSpec is the spec of mds +type MdsSpec struct { + // +optional + Port *int `json:"port,omitempty"` + // +optional + DummyPort *int `json:"dummyPort,omitempty"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// StorageScopeSpec is the spec of storage scope +type StorageScopeSpec struct { + // +optional + Port *int `json:"port,omitempty"` + // +optional + Instances int `json:"instances,omitempty"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// SnapShotCloneSpec is the spec of snapshot clone +type SnapShotCloneSpec struct { + // +optional + Enable bool `json:"enable,omitempty"` + // +optional + Port *int `json:"port,omitempty"` + // +optional + DummyPort *int `json:"dummyPort,omitempty"` + // +optional + ProxyPort *int `json:"proxyPort,omitempty"` + // +optional + S3Config S3ConfigSpec `json:"s3,omitempty"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// S3ConfigSpec is the spec of s3 config +type S3ConfigSpec struct { + AK string `json:"ak,omitempty"` + SK string `json:"sk,omitempty"` + NosAddress string `json:"nosAddress,omitempty"` + SnapShotBucketName string `json:"bucketName,omitempty"` +} + +// MdsSpec is the spec of mds +type MetaServerSpec struct { + // +optional + Port *int `json:"port,omitempty"` + // +optional + ExternalPort *int `json:"externalPort,omitempty"` + // +optional + Instances int `json:"instances,omitempty"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +type MonitorSpec struct { + Enable bool `json:"enable,omitempty"` + // +optional + MonitorHost string `json:"monitorHost,omitempty"` + // +optional + Prometheus PrometheusSpec `json:"prometheus,omitempty"` + // +optional + Grafana GrafanaSpec `json:"grafana,omitempty"` + // +optional + NodeExporter NodeExporterSpec `json:"nodeExporter,omitempty"` +} + +type PrometheusSpec struct { + // +optional + ContainerImage string `json:"containerImage,omitempty"` + // +optional + DataDir string `json:"dataDir,omitempty"` + // +optional + ListenPort int `json:"listenPort,omitempty"` + // +optional + RetentionTime string `json:"retentionTime,omitempty"` + // +optional + RetentionSize string `json:"retentionSize,omitempty"` +} + +type GrafanaSpec struct { + // +optional + ContainerImage string `json:"containerImage,omitempty"` + // +optional + DataDir string `json:"dataDir,omitempty"` + // +optional + ListenPort int `json:"listenPort,omitempty"` + // +optional + UserName string `json:"userName,omitempty"` + // +optional + PassWord string `json:"passWord,omitempty"` +} + +type NodeExporterSpec struct { + // +optional + ContainerImage string `json:"containerImage,omitempty"` + // +optional + ListenPort int `json:"listenPort,omitempty"` +} + +type StorageStatusDir struct { + // DataDir record the cluster data storage directory + DataDir string `json:"dataDir,omitempty"` + // LogDir record the cluster log storage directory + LogDir string `json:"logDir,omitempty"` +} + +type ModContext struct { + // Role represents the service role of modification + Role string `json:"role,omitempty"` + // Parameter represents the parameters of modification + Parameters map[string]string `json:"parameters,omitempty"` +} + +type LastModContextSet struct { + ModContextSet []ModContext `json:"modContextSet,omitempty"` +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index e1c487df..af780798 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v1 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -41,21 +41,6 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. -func (in *ClusterVersion) DeepCopy() *ClusterVersion { - if in == nil { - return nil - } - out := new(ClusterVersion) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CurveCluster) DeepCopyInto(out *CurveCluster) { *out = *in @@ -124,11 +109,31 @@ func (in *CurveClusterSpec) DeepCopyInto(out *CurveClusterSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - in.Etcd.DeepCopyInto(&out.Etcd) - in.Mds.DeepCopyInto(&out.Mds) - out.SnapShotClone = in.SnapShotClone - in.Storage.DeepCopyInto(&out.Storage) - out.Monitor = in.Monitor + if in.Copysets != nil { + in, out := &in.Copysets, &out.Copysets + *out = new(int) + **out = **in + } + if in.Etcd != nil { + in, out := &in.Etcd, &out.Etcd + *out = new(EtcdSpec) + (*in).DeepCopyInto(*out) + } + if in.Mds != nil { + in, out := &in.Mds, &out.Mds + *out = new(MdsSpec) + (*in).DeepCopyInto(*out) + } + if in.Chunkserver != nil { + in, out := &in.Chunkserver, &out.Chunkserver + *out = new(StorageScopeSpec) + (*in).DeepCopyInto(*out) + } + if in.SnapShotClone != nil { + in, out := &in.SnapShotClone, &out.SnapShotClone + *out = new(SnapShotCloneSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurveClusterSpec. @@ -247,11 +252,26 @@ func (in *CurvefsSpec) DeepCopyInto(out *CurvefsSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - in.Etcd.DeepCopyInto(&out.Etcd) - in.Mds.DeepCopyInto(&out.Mds) - in.MetaServer.DeepCopyInto(&out.MetaServer) - out.SnapShotClone = in.SnapShotClone - out.Monitor = in.Monitor + if in.Copysets != nil { + in, out := &in.Copysets, &out.Copysets + *out = new(int) + **out = **in + } + if in.Etcd != nil { + in, out := &in.Etcd, &out.Etcd + *out = new(EtcdSpec) + (*in).DeepCopyInto(*out) + } + if in.Mds != nil { + in, out := &in.Mds, &out.Mds + *out = new(MdsSpec) + (*in).DeepCopyInto(*out) + } + if in.MetaServer != nil { + in, out := &in.MetaServer, &out.MetaServer + *out = new(MetaServerSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurvefsSpec. @@ -275,6 +295,8 @@ func (in *CurvefsStatus) DeepCopyInto(out *CurvefsStatus) { } } out.CurveVersion = in.CurveVersion + in.LastModContextSet.DeepCopyInto(&out.LastModContextSet) + out.StorageDir = in.StorageDir } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurvefsStatus. @@ -287,24 +309,19 @@ func (in *CurvefsStatus) DeepCopy() *CurvefsStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevicesSpec) DeepCopyInto(out *DevicesSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevicesSpec. -func (in *DevicesSpec) DeepCopy() *DevicesSpec { - if in == nil { - return nil - } - out := new(DevicesSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdSpec) DeepCopyInto(out *EtcdSpec) { *out = *in + if in.PeerPort != nil { + in, out := &in.PeerPort, &out.PeerPort + *out = new(int) + **out = **in + } + if in.ClientPort != nil { + in, out := &in.ClientPort, &out.ClientPort + *out = new(int) + **out = **in + } if in.Config != nil { in, out := &in.Config, &out.Config *out = make(map[string]string, len(*in)) @@ -339,9 +356,41 @@ func (in *GrafanaSpec) DeepCopy() *GrafanaSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastModContextSet) DeepCopyInto(out *LastModContextSet) { + *out = *in + if in.ModContextSet != nil { + in, out := &in.ModContextSet, &out.ModContextSet + *out = make([]ModContext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastModContextSet. +func (in *LastModContextSet) DeepCopy() *LastModContextSet { + if in == nil { + return nil + } + out := new(LastModContextSet) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MdsSpec) DeepCopyInto(out *MdsSpec) { *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.DummyPort != nil { + in, out := &in.DummyPort, &out.DummyPort + *out = new(int) + **out = **in + } if in.Config != nil { in, out := &in.Config, &out.Config *out = make(map[string]string, len(*in)) @@ -364,6 +413,16 @@ func (in *MdsSpec) DeepCopy() *MdsSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MetaServerSpec) DeepCopyInto(out *MetaServerSpec) { *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in + } + if in.ExternalPort != nil { + in, out := &in.ExternalPort, &out.ExternalPort + *out = new(int) + **out = **in + } if in.Config != nil { in, out := &in.Config, &out.Config *out = make(map[string]string, len(*in)) @@ -383,6 +442,28 @@ func (in *MetaServerSpec) DeepCopy() *MetaServerSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModContext) DeepCopyInto(out *ModContext) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModContext. +func (in *ModContext) DeepCopy() *ModContext { + if in == nil { + return nil + } + out := new(ModContext) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitorSpec) DeepCopyInto(out *MonitorSpec) { *out = *in @@ -447,29 +528,31 @@ func (in *S3ConfigSpec) DeepCopy() *S3ConfigSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelectedNodesSpec) DeepCopyInto(out *SelectedNodesSpec) { +func (in *SnapShotCloneSpec) DeepCopyInto(out *SnapShotCloneSpec) { *out = *in - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]DevicesSpec, len(*in)) - copy(*out, *in) + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelectedNodesSpec. -func (in *SelectedNodesSpec) DeepCopy() *SelectedNodesSpec { - if in == nil { - return nil + if in.DummyPort != nil { + in, out := &in.DummyPort, &out.DummyPort + *out = new(int) + **out = **in + } + if in.ProxyPort != nil { + in, out := &in.ProxyPort, &out.ProxyPort + *out = new(int) + **out = **in } - out := new(SelectedNodesSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SnapShotCloneSpec) DeepCopyInto(out *SnapShotCloneSpec) { - *out = *in out.S3Config = in.S3Config + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapShotCloneSpec. @@ -485,21 +568,16 @@ func (in *SnapShotCloneSpec) DeepCopy() *SnapShotCloneSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageScopeSpec) DeepCopyInto(out *StorageScopeSpec) { *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]DevicesSpec, len(*in)) - copy(*out, *in) + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int) + **out = **in } - if in.SelectedNodes != nil { - in, out := &in.SelectedNodes, &out.SelectedNodes - *out = make([]SelectedNodesSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val } } } @@ -513,3 +591,18 @@ func (in *StorageScopeSpec) DeepCopy() *StorageScopeSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStatusDir) DeepCopyInto(out *StorageStatusDir) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStatusDir. +func (in *StorageStatusDir) DeepCopy() *StorageStatusDir { + if in == nil { + return nil + } + out := new(StorageStatusDir) + in.DeepCopyInto(out) + return out +} diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml index 5e9ad7a1..63839972 100644 --- a/config/certmanager/certificate.yaml +++ b/config/certmanager/certificate.yaml @@ -6,7 +6,7 @@ apiVersion: cert-manager.io/v1alpha2 kind: Issuer metadata: name: selfsigned-issuer - namespace: curvebs + namespace: curve spec: selfSigned: {} --- @@ -14,7 +14,7 @@ apiVersion: cert-manager.io/v1alpha2 kind: Certificate metadata: name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml - namespace: curvebs + namespace: curve spec: # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize dnsNames: diff --git a/config/crd/bases/operator.curve.io_curveclusters.yaml b/config/crd/bases/operator.curve.io_curveclusters.yaml index ad1b94d8..a9ec7f23 100644 --- a/config/crd/bases/operator.curve.io_curveclusters.yaml +++ b/config/crd/bases/operator.curve.io_curveclusters.yaml @@ -9,8 +9,11 @@ metadata: name: curveclusters.operator.curve.io spec: additionalPrinterColumns: - - JSONPath: .spec.hostDataDir - name: HostDataDir + - JSONPath: .spec.dataDir + name: DataDir + type: string + - JSONPath: .spec.logDir + name: LogDir type: string - JSONPath: .spec.curveVersion.image name: Version @@ -46,27 +49,29 @@ spec: spec: description: CurveClusterSpec defines the desired state of CurveCluster properties: - cleanupConfirm: - description: Indicates user intent when deleting a cluster; blocks orchestration - and should not be set if cluster deletion is not imminent. - nullable: true - type: string + chunkserver: + description: StorageScopeSpec is the spec of storage scope + properties: + config: + additionalProperties: + type: string + type: object + instances: + type: integer + port: + type: integer + type: object + copysets: + type: integer curveVersion: description: CurveVersionSpec represents the settings for the Curve version properties: image: type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a - container image - enum: - - IfNotPresent - - Always - - Never - - "" - type: string type: object + dataDir: + type: string etcd: description: EtcdSpec is the spec of etcd properties: @@ -79,7 +84,7 @@ spec: peerPort: type: integer type: object - hostDataDir: + logDir: type: string mds: description: MdsSpec is the spec of mds @@ -93,53 +98,17 @@ spec: port: type: integer type: object - monitor: - properties: - enable: - type: boolean - grafana: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - passWord: - type: string - userName: - type: string - type: object - monitorHost: - type: string - nodeExporter: - properties: - containerImage: - type: string - listenPort: - type: integer - type: object - prometheus: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - retentionSize: - type: string - retentionTime: - type: string - type: object - type: object nodes: items: type: string type: array - snapShotClone: + snapshotclone: description: SnapShotCloneSpec is the spec of snapshot clone properties: + config: + additionalProperties: + type: string + type: object dummyPort: type: integer enable: @@ -148,7 +117,7 @@ spec: type: integer proxyPort: type: integer - s3Config: + s3: description: S3ConfigSpec is the spec of s3 config properties: ak: @@ -161,52 +130,6 @@ spec: type: string type: object type: object - storage: - description: StorageScopeSpec is the spec of storage scope - properties: - copySets: - type: integer - devices: - items: - description: DevicesSpec represents a disk to use in the cluster - properties: - mountPath: - type: string - name: - type: string - percentage: - type: integer - type: object - type: array - nodes: - items: - type: string - type: array - port: - type: integer - selectedNodes: - items: - properties: - devices: - items: - description: DevicesSpec represents a disk to use in the - cluster - properties: - mountPath: - type: string - name: - type: string - percentage: - type: integer - type: object - type: array - node: - type: string - type: object - type: array - useSelectedNodes: - type: boolean - type: object type: object status: description: CurveClusterStatus defines the observed state of CurveCluster @@ -225,10 +148,6 @@ spec: description: Message is a human readable message indicating details about last transition. type: string - observedGeneration: - description: ObservedGeneration - format: int64 - type: integer reason: description: Reason is a unique, one-word, CamelCase reason for the condition's last transition. diff --git a/config/crd/bases/operator.curve.io_curvefs.yaml b/config/crd/bases/operator.curve.io_curvefs.yaml index f6c3a5a8..87270ff2 100644 --- a/config/crd/bases/operator.curve.io_curvefs.yaml +++ b/config/crd/bases/operator.curve.io_curvefs.yaml @@ -9,8 +9,11 @@ metadata: name: curvefs.operator.curve.io spec: additionalPrinterColumns: - - JSONPath: .spec.hostDataDir - name: HostDataDir + - JSONPath: .spec.dataDir + name: DataDir + type: string + - JSONPath: .spec.logDir + name: LogDir type: string - JSONPath: .spec.curveVersion.image name: Version @@ -46,27 +49,17 @@ spec: spec: description: CurvefsSpec defines the desired state of Curvefs properties: - cleanupConfirm: - description: Indicates user intent when deleting a cluster; blocks orchestration - and should not be set if cluster deletion is not imminent. - nullable: true - type: string + copysets: + type: integer curveVersion: description: CurveVersionSpec represents the settings for the Curve version properties: image: type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a - container image - enum: - - IfNotPresent - - Always - - Never - - "" - type: string type: object + dataDir: + type: string etcd: description: EtcdSpec is the spec of etcd properties: @@ -79,7 +72,7 @@ spec: peerPort: type: integer type: object - hostDataDir: + logDir: type: string mds: description: MdsSpec is the spec of mds @@ -100,81 +93,17 @@ spec: additionalProperties: type: string type: object - copySets: - type: integer externalPort: type: integer + instances: + type: integer port: type: integer type: object - monitor: - properties: - enable: - type: boolean - grafana: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - passWord: - type: string - userName: - type: string - type: object - monitorHost: - type: string - nodeExporter: - properties: - containerImage: - type: string - listenPort: - type: integer - type: object - prometheus: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - retentionSize: - type: string - retentionTime: - type: string - type: object - type: object nodes: items: type: string type: array - snapShotClone: - description: SnapShotCloneSpec is the spec of snapshot clone - properties: - dummyPort: - type: integer - enable: - type: boolean - port: - type: integer - proxyPort: - type: integer - s3Config: - description: S3ConfigSpec is the spec of s3 config - properties: - ak: - type: string - bucketName: - type: string - nosAddress: - type: string - sk: - type: string - type: object - type: object type: object status: description: CurvefsStatus defines the observed state of Curvefs @@ -193,10 +122,6 @@ spec: description: Message is a human readable message indicating details about last transition. type: string - observedGeneration: - description: ObservedGeneration - format: int64 - type: integer reason: description: Reason is a unique, one-word, CamelCase reason for the condition's last transition. @@ -211,19 +136,51 @@ spec: type: object type: array curveVersion: - description: CurveVersion shows curve version info on status field + description: CurveVersion shows curve version info on status field that + judge iff upgrade properties: image: type: string type: object + lastModContextSet: + description: LastModContextSet means that need to modify operatrion + context + properties: + modContextSet: + items: + properties: + parameters: + additionalProperties: + type: string + description: Parameter represents the parameters of modification + type: object + role: + description: Role represents the service role of modification + type: string + type: object + type: array + type: object message: description: Message shows summary message of cluster from ClusterState such as 'Curve Cluster Created successfully' type: string phase: - description: Phase is a summary of cluster state. It can be translated - from the last conditiontype + description: 'Phase is a summary of cluster state. It can be translated + from the last conditiontype ClusterPending: The cluster has been accepted + by system, but in the process ClusterRunning: The cluster is healthy + and is running process ClusterDeleting: The cluster is in deleting + process ClusterUnknown: The cluster state is unknown' type: string + storageStatusDir: + description: DataDir and LogDir is to compare and update + properties: + dataDir: + description: DataDir record the cluster data storage directory + type: string + logDir: + description: LogDir record the cluster log storage directory + type: string + type: object type: object type: object version: v1 diff --git a/config/crd/patches/webhook_in_curveclusters.yaml b/config/crd/patches/webhook_in_curveclusters.yaml index 8fe389ab..e681a7b3 100644 --- a/config/crd/patches/webhook_in_curveclusters.yaml +++ b/config/crd/patches/webhook_in_curveclusters.yaml @@ -12,6 +12,6 @@ spec: # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) caBundle: Cg== service: - namespace: curvebs + namespace: curve name: webhook-service path: /convert diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 2f64530f..52f21010 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -18,9 +18,9 @@ bases: - ../manager # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -#- ../webhook +- ../webhook # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -#- ../certmanager +- ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus @@ -32,39 +32,39 @@ patchesStrategicMerge: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml -#- manager_webhook_patch.yaml +- manager_webhook_patch.yaml # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection -#- webhookcainjection_patch.yaml +- webhookcainjection_patch.yaml # the following config is for teaching kustomize how to do var substitution vars: # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1alpha2 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldref: -# fieldpath: metadata.namespace -#- name: CERTIFICATE_NAME -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1alpha2 -# name: serving-cert # this name should match the one in certificate.yaml -#- name: SERVICE_NAMESPACE # namespace of the service -# objref: -# kind: Service -# version: v1 -# name: webhook-service -# fieldref: -# fieldpath: metadata.namespace -#- name: SERVICE_NAME -# objref: -# kind: Service -# version: v1 -# name: webhook-service +- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1alpha2 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace +- name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1alpha2 + name: serving-cert # this name should match the one in certificate.yaml +- name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace +- name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index b178c849..8d4a50ca 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -4,7 +4,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: curve-operator - namespace: curvebs + namespace: curve spec: template: spec: diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml index dfe1bf96..c896c3d1 100644 --- a/config/default/manager_webhook_patch.yaml +++ b/config/default/manager_webhook_patch.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: curve-operator - namespace: curvebs + namespace: curve spec: template: spec: diff --git a/config/deploy/manifests.yaml b/config/deploy/manifests.yaml index 58c0793d..19b204d0 100644 --- a/config/deploy/manifests.yaml +++ b/config/deploy/manifests.yaml @@ -14,8 +14,11 @@ metadata: name: curveclusters.operator.curve.io spec: additionalPrinterColumns: - - JSONPath: .spec.hostDataDir - name: HostDataDir + - JSONPath: .spec.dataDir + name: DataDir + type: string + - JSONPath: .spec.logDir + name: LogDir type: string - JSONPath: .spec.curveVersion.image name: Version @@ -51,27 +54,29 @@ spec: spec: description: CurveClusterSpec defines the desired state of CurveCluster properties: - cleanupConfirm: - description: Indicates user intent when deleting a cluster; blocks orchestration - and should not be set if cluster deletion is not imminent. - nullable: true - type: string + chunkserver: + description: StorageScopeSpec is the spec of storage scope + properties: + config: + additionalProperties: + type: string + type: object + instances: + type: integer + port: + type: integer + type: object + copysets: + type: integer curveVersion: description: CurveVersionSpec represents the settings for the Curve version properties: image: type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a - container image - enum: - - IfNotPresent - - Always - - Never - - "" - type: string type: object + dataDir: + type: string etcd: description: EtcdSpec is the spec of etcd properties: @@ -84,7 +89,7 @@ spec: peerPort: type: integer type: object - hostDataDir: + logDir: type: string mds: description: MdsSpec is the spec of mds @@ -98,53 +103,17 @@ spec: port: type: integer type: object - monitor: - properties: - enable: - type: boolean - grafana: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - passWord: - type: string - userName: - type: string - type: object - monitorHost: - type: string - nodeExporter: - properties: - containerImage: - type: string - listenPort: - type: integer - type: object - prometheus: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - retentionSize: - type: string - retentionTime: - type: string - type: object - type: object nodes: items: type: string type: array - snapShotClone: + snapshotclone: description: SnapShotCloneSpec is the spec of snapshot clone properties: + config: + additionalProperties: + type: string + type: object dummyPort: type: integer enable: @@ -153,7 +122,7 @@ spec: type: integer proxyPort: type: integer - s3Config: + s3: description: S3ConfigSpec is the spec of s3 config properties: ak: @@ -166,52 +135,6 @@ spec: type: string type: object type: object - storage: - description: StorageScopeSpec is the spec of storage scope - properties: - copySets: - type: integer - devices: - items: - description: DevicesSpec represents a disk to use in the cluster - properties: - mountPath: - type: string - name: - type: string - percentage: - type: integer - type: object - type: array - nodes: - items: - type: string - type: array - port: - type: integer - selectedNodes: - items: - properties: - devices: - items: - description: DevicesSpec represents a disk to use in the - cluster - properties: - mountPath: - type: string - name: - type: string - percentage: - type: integer - type: object - type: array - node: - type: string - type: object - type: array - useSelectedNodes: - type: boolean - type: object type: object status: description: CurveClusterStatus defines the observed state of CurveCluster @@ -230,10 +153,6 @@ spec: description: Message is a human readable message indicating details about last transition. type: string - observedGeneration: - description: ObservedGeneration - format: int64 - type: integer reason: description: Reason is a unique, one-word, CamelCase reason for the condition's last transition. @@ -284,8 +203,11 @@ metadata: name: curvefs.operator.curve.io spec: additionalPrinterColumns: - - JSONPath: .spec.hostDataDir - name: HostDataDir + - JSONPath: .spec.dataDir + name: DataDir + type: string + - JSONPath: .spec.logDir + name: LogDir type: string - JSONPath: .spec.curveVersion.image name: Version @@ -321,27 +243,17 @@ spec: spec: description: CurvefsSpec defines the desired state of Curvefs properties: - cleanupConfirm: - description: Indicates user intent when deleting a cluster; blocks orchestration - and should not be set if cluster deletion is not imminent. - nullable: true - type: string + copysets: + type: integer curveVersion: description: CurveVersionSpec represents the settings for the Curve version properties: image: type: string - imagePullPolicy: - description: PullPolicy describes a policy for if/when to pull a - container image - enum: - - IfNotPresent - - Always - - Never - - "" - type: string type: object + dataDir: + type: string etcd: description: EtcdSpec is the spec of etcd properties: @@ -354,7 +266,7 @@ spec: peerPort: type: integer type: object - hostDataDir: + logDir: type: string mds: description: MdsSpec is the spec of mds @@ -375,81 +287,17 @@ spec: additionalProperties: type: string type: object - copySets: - type: integer externalPort: type: integer + instances: + type: integer port: type: integer type: object - monitor: - properties: - enable: - type: boolean - grafana: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - passWord: - type: string - userName: - type: string - type: object - monitorHost: - type: string - nodeExporter: - properties: - containerImage: - type: string - listenPort: - type: integer - type: object - prometheus: - properties: - containerImage: - type: string - dataDir: - type: string - listenPort: - type: integer - retentionSize: - type: string - retentionTime: - type: string - type: object - type: object nodes: items: type: string type: array - snapShotClone: - description: SnapShotCloneSpec is the spec of snapshot clone - properties: - dummyPort: - type: integer - enable: - type: boolean - port: - type: integer - proxyPort: - type: integer - s3Config: - description: S3ConfigSpec is the spec of s3 config - properties: - ak: - type: string - bucketName: - type: string - nosAddress: - type: string - sk: - type: string - type: object - type: object type: object status: description: CurvefsStatus defines the observed state of Curvefs @@ -468,10 +316,6 @@ spec: description: Message is a human readable message indicating details about last transition. type: string - observedGeneration: - description: ObservedGeneration - format: int64 - type: integer reason: description: Reason is a unique, one-word, CamelCase reason for the condition's last transition. @@ -486,19 +330,51 @@ spec: type: object type: array curveVersion: - description: CurveVersion shows curve version info on status field + description: CurveVersion shows curve version info on status field that + judge iff upgrade properties: image: type: string type: object + lastModContextSet: + description: LastModContextSet means that need to modify operatrion + context + properties: + modContextSet: + items: + properties: + parameters: + additionalProperties: + type: string + description: Parameter represents the parameters of modification + type: object + role: + description: Role represents the service role of modification + type: string + type: object + type: array + type: object message: description: Message shows summary message of cluster from ClusterState such as 'Curve Cluster Created successfully' type: string phase: - description: Phase is a summary of cluster state. It can be translated - from the last conditiontype + description: 'Phase is a summary of cluster state. It can be translated + from the last conditiontype ClusterPending: The cluster has been accepted + by system, but in the process ClusterRunning: The cluster is healthy + and is running process ClusterDeleting: The cluster is in deleting + process ClusterUnknown: The cluster state is unknown' type: string + storageStatusDir: + description: DataDir and LogDir is to compare and update + properties: + dataDir: + description: DataDir record the cluster data storage directory + type: string + logDir: + description: LogDir record the cluster log storage directory + type: string + type: object type: object type: object version: v1 @@ -715,6 +591,18 @@ subjects: name: curve-operator namespace: curve --- +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: curve +spec: + ports: + - port: 443 + targetPort: 9443 + selector: + control-plane: curve-operator +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -736,6 +624,15 @@ spec: curve: operator spec: containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true - args: - --enable-leader-election=true command: @@ -751,3 +648,120 @@ spec: memory: 100Mi serviceAccountName: curve-operator terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + name: serving-cert + namespace: curve +spec: + dnsNames: + - webhook-service.curve.svc + - webhook-service.curve.svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: curve +spec: + selfSigned: {} +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: curve/serving-cert + name: mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: curve + path: /mutate-operator-curve-io-v1-curvecluster + failurePolicy: Fail + name: mcurvecluster.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - curveclusters +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: curve + path: /mutate-operator-curve-io-v1-curvefs + failurePolicy: Fail + name: mcurvefs.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - curvefs +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + annotations: + cert-manager.io/inject-ca-from: curve/serving-cert + name: validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: curve + path: /validate-operator-curve-io-v1-curvecluster + failurePolicy: Fail + name: vcurvecluster.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - curveclusters +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: curve + path: /validate-operator-curve-io-v1-curvefs + failurePolicy: Fail + name: vcurvefs.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - curvefs diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index 7759ed0a..6f7576f5 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -6,7 +6,7 @@ metadata: labels: control-plane: curve-operator name: curve-operator-metrics-monitor - namespace: curvebs + namespace: curve spec: endpoints: - path: /metrics diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index 639073b7..8b1da6dc 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: default - namespace: curvebs + namespace: curve diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 2db3e0f2..cb67517d 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -4,7 +4,7 @@ metadata: labels: control-plane: curve-operator name: curve-operator-metrics-service - namespace: curvebs + namespace: curve spec: ports: - name: https diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index cf22112f..26b58066 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: default - namespace: curvebs + namespace: curve diff --git a/config/samples/fscluster.yaml b/config/samples/fscluster.yaml index f7273821..e2c12e74 100644 --- a/config/samples/fscluster.yaml +++ b/config/samples/fscluster.yaml @@ -2,17 +2,12 @@ apiVersion: operator.curve.io/v1 kind: Curvefs metadata: name: my-fscluster - # The namespace to deploy CurveBS cluster. - # Curve operator is deployed in this namespace,Do not modify if not necessary namespace: curve spec: # The container image used to launch the Curve daemon pods(etcd, mds, metaserver). # v1.2 is Pacific and v1.3 is not tested. curveVersion: image: curve2operator/curvefs:monitor-v2.4.0-beta2 - # Container image pull policy, - # By default the pull policy of all containers in that pod will be set to IfNotPresent if it is not explicitly specified and no modification necessary. - imagePullPolicy: IfNotPresent # The K8s cluster nodes name in cluster that prepare to deploy Curve daemon pods(etcd, mds, metaserver). # For stand-alone deployment, set one host here and see fscluster-onehost.yaml # - node1 -> etcd-a, mds-a, metaserver-a @@ -24,7 +19,9 @@ spec: - curve-operator-node3 # DataDirHostPath and LogDirHostPath where data files and log files will be persisted on host machine. Must be specified. # If you reinstall the cluster, make surce that you delete this directory from each host. - hostDataDir: /curvefs + dataDir: /curvefs + logDir: /curvefs + copySets: 100 etcd: # Port for listening to partner communication. # Etcd member accept incoming requests from its peers on a specific scheme://IP:port combination and the IP is host ip because we use hostnetwork:true. @@ -36,22 +33,4 @@ spec: dummyPort: 7700 metaserver: port: 16800 - externalPort: 16800 - copySets: 100 - monitor: - enable: false - monitorHost: curve-operator-node1 - nodeExporter: - containerImage: prom/node-exporter:latest - listenPort: 9100 - prometheus: - containerImage: prom/prometheus:latest - dataDir: /tmp/monitor/prometheus - listenPort: 9090 - retentionTime: 7d - retentionSize: 256GB - grafana: - containerImage: grafana/grafana:latest - dataDir: /tmp/monitor/grafana - listenPort: 3000 - \ No newline at end of file + externalPort: 16800 \ No newline at end of file diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 00000000..7dd109c1 --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,90 @@ + +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + creationTimestamp: null + name: mutating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-operator-curve-io-v1-curvecluster + failurePolicy: Fail + name: mcurvecluster.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - curveclusters +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /mutate-operator-curve-io-v1-curvefs + failurePolicy: Fail + name: mcurvefs.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - curvefs + +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + name: validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-operator-curve-io-v1-curvecluster + failurePolicy: Fail + name: vcurvecluster.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - curveclusters +- clientConfig: + caBundle: Cg== + service: + name: webhook-service + namespace: system + path: /validate-operator-curve-io-v1-curvefs + failurePolicy: Fail + name: vcurvefs.kb.io + rules: + - apiGroups: + - operator.curve.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - curvefs diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index d3a863c6..9784b7d4 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: webhook-service - namespace: curvebs + namespace: curve spec: ports: - port: 443 diff --git a/main.go b/main.go index 9207f836..0244d6ff 100644 --- a/main.go +++ b/main.go @@ -67,9 +67,8 @@ func main() { cmd := &cobra.Command{ Use: "curve-operator", - // TODO: Rewrite this long message. Long: `The Curve-Operator is a daemon to deploy Curve and auto it on kubernetes. - It support for Curve storage to natively integrate with cloud-native environments.`, + It supports Curve storage to natively integrate with cloud-native environments.`, Run: func(cmd *cobra.Command, args []string) { setupLog.Error(opts.Run(), "failed to run curve-operator") os.Exit(1) @@ -90,16 +89,10 @@ func (opts *CurveOptions) Run() error { config := ctrl.GetConfigOrDie() clientSet, err := kubernetes.NewForConfig(config) if err != nil { - setupLog.Error(err, "create clientset failed") + setupLog.Error(err, "failed to create clientset") os.Exit(1) } - // Create clusterd context - context := clusterd.Context{ - KubeConfig: config, - Clientset: clientSet, - } - mgr, err := ctrl.NewManager(config, ctrl.Options{ Scheme: scheme, MetricsBindAddress: opts.MetricsAddr, @@ -112,6 +105,12 @@ func (opts *CurveOptions) Run() error { os.Exit(1) } + context := clusterd.Context{ + KubeConfig: config, + Clientset: clientSet, + Client: mgr.GetClient(), + } + if err = (controllers.NewCurveClusterReconciler( mgr.GetClient(), ctrl.Log.WithName("controllers").WithName("CurveCluster"), @@ -130,6 +129,14 @@ func (opts *CurveOptions) Run() error { setupLog.Error(err, "unable to create controller", "controller", "CurvefsCluster") os.Exit(1) } + if err = (&operatorv1.CurveCluster{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "CurveCluster") + os.Exit(1) + } + if err = (&operatorv1.Curvefs{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Curvefs") + os.Exit(1) + } // +kubebuilder:scaffold:builder setupLog.Info("starting manager") diff --git a/pkg/chunkserver/chunkfilepool.go b/pkg/chunkserver/chunkfilepool.go deleted file mode 100644 index 241bf7c5..00000000 --- a/pkg/chunkserver/chunkfilepool.go +++ /dev/null @@ -1,320 +0,0 @@ -package chunkserver - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" - batch "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/chunkserver/script" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" -) - -const ( - PrepareJobName = "prepare-chunkfile" - DEFAULT_CHUNKFILE_SIZE = 16 * 1024 * 1024 // 16MB - - formatConfigMapName = "format-chunkfile-conf" - formatScriptFileDataKey = "format.sh" - formatScriptMountPath = "/curvebs/tools/sbin/format.sh" -) - -type storageNodeInfo struct { - nodeName string - nodeIP string -} - -type Job2DeviceInfo struct { - job *batch.Job - device *curvev1.DevicesSpec - nodeName string -} - -// global variables -var job2DeviceInfos []*Job2DeviceInfo -var chunkserverConfigs []chunkserverConfig - -// startProvisioningOverNodes format device and provision chunk files -func (c *Cluster) startProvisioningOverNodes(nodesInfo []daemon.NodeInfo, globakDCs []*topology.DeployConfig) ([]*topology.DeployConfig, []*topology.DeployConfig, error) { - dcs := []*topology.DeployConfig{} - if !c.Chunkserver.UseSelectedNodes { - // clear slice - job2DeviceInfos = []*Job2DeviceInfo{} - chunkserverConfigs = []chunkserverConfig{} - hostnameMap, err := k8sutil.GetNodeHostNames(c.Context.Clientset) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to get node hostnames") - } - - var storageNodes []string - for _, nodeName := range c.Chunkserver.Nodes { - storageNodes = append(storageNodes, hostnameMap[nodeName]) - } - - // get valid nodes that ready status and is schedulable - validNodes, _ := k8sutil.GetValidNodes(c.Context, storageNodes) - if len(validNodes) == 0 { - logger.Warningf("no valid nodes available to run chunkservers on nodes in namespace %q", c.NamespacedName.Namespace) - return nil, nil, nil - } - logger.Infof("%d of the %d storage nodes are valid", len(validNodes), len(c.Chunkserver.Nodes)) - - storageNodeInfos := []storageNodeInfo{} - for _, node := range validNodes { - nodeIP := "" - for _, address := range node.Status.Addresses { - if address.Type == "InternalIP" { - nodeIP = address.Address - } - } - storageNodeInfos = append(storageNodeInfos, storageNodeInfo{ - nodeName: node.Name, - nodeIP: nodeIP, - }) - } - - err = c.UpdateSpecRoleAllConfigMap(config.ChunkserverAllConfigMapName, formatScriptFileDataKey, script.FORMAT, nil) - if err != nil { - return nil, nil, err - } - - // get ClusterEtcdAddr - etcdOverrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.EtcdOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to get etcd override endoints configmap") - } - clusterEtcdAddr := etcdOverrideCM.Data[config.ClusterEtcdAddr] - - // get ClusterMdsAddr - mdsOverrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.MdsOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to get mds override endoints configmap") - } - clusterMdsAddr := mdsOverrideCM.Data[config.MdsOvverideConfigMapDataKey] - clusterMdsDummyPort := mdsOverrideCM.Data[config.ClusterMdsDummyPort] - - // get clusterSnapCloneAddr and clusterSnapShotCloneDummyPort - var clusterSnapCloneAddr string - var clusterSnapShotCloneDummyPort string - if c.SnapShotClone.Enable { - for _, node := range nodesInfo { - clusterSnapCloneAddr = fmt.Sprint(clusterSnapCloneAddr, node.NodeIP, ":", node.SnapshotClonePort, ",") - clusterSnapShotCloneDummyPort = fmt.Sprint(clusterSnapShotCloneDummyPort, strconv.Itoa(node.SnapshotCloneDummyPort), ",") - } - clusterSnapCloneAddr = strings.TrimRight(clusterSnapCloneAddr, ",") - clusterSnapShotCloneDummyPort = strings.TrimRight(clusterSnapShotCloneDummyPort, ",") - } - - hostSequence, daemonID := 0, 0 - var daemonIDString string - // travel all valid nodes to start job to prepare chunkfiles - for _, node := range storageNodeInfos { - portBase := c.Chunkserver.Port - replicasSequence := 0 - // travel all device to run format job and construct chunkserverConfig - for _, device := range c.Chunkserver.Devices { - daemonIDString = k8sutil.IndexToName(daemonID) - name := strings.TrimSpace(device.Name) - name = strings.TrimRight(name, "/") - nameArr := strings.Split(name, "/") - name = nameArr[len(nameArr)-1] - resourceName := fmt.Sprintf("%s-%s-%s", AppName, node.nodeName, name) - currentConfigMapName := fmt.Sprintf("%s-%s-%s", ConfigMapNamePrefix, node.nodeName, name) - - logger.Infof("creating job for device %q on host %q", device.Name, node.nodeName) - - job, err := c.runPrepareJob(node.nodeName, device) - if err != nil { - return nil, nil, err - } - - jobInfo := &Job2DeviceInfo{ - job, - &device, - node.nodeName, - } - // jobsArr record all the job that have started, to determine whether the format is completed - job2DeviceInfos = append(job2DeviceInfos, jobInfo) - - // create chunkserver config for each device of every node - chunkserverConfig := chunkserverConfig{ - Prefix: Prefix, - Port: portBase, - ClusterMdsAddr: clusterMdsAddr, - ClusterMdsDummyPort: clusterMdsDummyPort, - ClusterEtcdAddr: clusterEtcdAddr, - ClusterSnapshotcloneAddr: clusterSnapCloneAddr, - ClusterSnapshotcloneDummyPort: clusterSnapShotCloneDummyPort, - - ResourceName: resourceName, - DaemonId: daemonIDString, - CurrentConfigMapName: currentConfigMapName, - DataPathMap: &chunkserverDataPathMap{ - HostDevice: device.Name, - HostLogDir: c.LogDirHostPath + "/chunkserver-" + node.nodeName + "-" + name, - ContainerDataDir: ChunkserverContainerDataDir, - ContainerLogDir: ChunkserverContainerLogDir, - }, - NodeName: node.nodeName, - NodeIP: node.nodeIP, - DeviceName: device.Name, - HostSequence: hostSequence, - ReplicasSequence: replicasSequence, - Replicas: len(c.Chunkserver.Devices), - } - - dc := &topology.DeployConfig{ - Kind: c.Kind, - Role: config.ROLE_CHUNKSERVER, - Copysets: c.Chunkserver.CopySets, - NodeName: node.nodeName, - NodeIP: node.nodeIP, - Port: portBase, - DeviceName: device.Name, - ReplicasSequence: replicasSequence, - Replicas: len(c.Chunkserver.Devices), - StandAlone: len(storageNodeInfos) == 1, - } - chunkserverConfigs = append(chunkserverConfigs, chunkserverConfig) - dcs = append(dcs, dc) - globakDCs = append(globakDCs, dc) - portBase++ - replicasSequence++ - daemonID++ - } - hostSequence++ - } - } - - return dcs, globakDCs, nil -} - -// runPrepareJob create job and run job -func (c *Cluster) runPrepareJob(nodeName string, device curvev1.DevicesSpec) (*batch.Job, error) { - job, _ := c.makeJob(nodeName, device) - err := k8sutil.RunReplaceableJob(context.TODO(), c.Context.Clientset, job, false) - if err != nil { - return &batch.Job{}, err - } - - return job, nil -} - -func (c *Cluster) makeJob(nodeName string, device curvev1.DevicesSpec) (*batch.Job, error) { - volumes, volumeMounts := c.createFormatVolumeAndMount(device) - - name := strings.TrimSpace(device.Name) - name = strings.TrimRight(name, "/") - nameArr := strings.Split(name, "/") - name = nameArr[len(nameArr)-1] - - jobName := PrepareJobName + "-" + nodeName + "-" + name - podName := PrepareJobName + "-" + nodeName - - runAsUser := int64(0) - runAsNonRoot := false - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Labels: c.getPodLabels(nodeName, device.Name), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.makeFormatContainer(device, volumeMounts), - }, - NodeName: nodeName, - RestartPolicy: v1.RestartPolicyOnFailure, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - SecurityContext: &v1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - }, - }, - } - - job := &batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: c.NamespacedName.Namespace, - Labels: c.getPodLabels(nodeName, device.Name), - }, - Spec: batch.JobSpec{ - Template: podSpec, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(job) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to job %q", job.Name) - } - - return job, nil -} - -func (c *Cluster) makeFormatContainer(device curvev1.DevicesSpec, volumeMounts []v1.VolumeMount) v1.Container { - privileged := true - runAsUser := int64(0) - runAsNonRoot := false - readOnlyRootFilesystem := false - - argsPercent := strconv.Itoa(device.Percentage) - argsFileSize := strconv.Itoa(DEFAULT_CHUNKFILE_SIZE) - argsFilePoolDir := ChunkserverContainerDataDir + "/chunkfilepool" - argsFilePoolMetaPath := ChunkserverContainerDataDir + "/chunkfilepool.meta" - - container := v1.Container{ - Name: "format", - Args: []string{ - device.Name, - ChunkserverContainerDataDir, - argsPercent, - argsFileSize, - argsFilePoolDir, - argsFilePoolMetaPath, - }, - Command: []string{ - "/bin/bash", - formatScriptMountPath, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: volumeMounts, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, - }, - } - - return container -} - -func (c *Cluster) getPodLabels(nodeName, deviceName string) map[string]string { - labels := make(map[string]string) - labels["app"] = PrepareJobName - labels["node"] = nodeName - s := strings.Split(deviceName, "/") - if len(s) > 1 { - deviceName = s[1] - } else { - // not occur - deviceName = nodeName - } - labels["device"] = deviceName - labels["curve_cluster"] = c.NamespacedName.Namespace - return labels -} diff --git a/pkg/chunkserver/chunkserver.go b/pkg/chunkserver/chunkserver.go deleted file mode 100644 index 05bd3620..00000000 --- a/pkg/chunkserver/chunkserver.go +++ /dev/null @@ -1,196 +0,0 @@ -package chunkserver - -import ( - "context" - "time" - - "emperror.dev/errors" - "github.com/coreos/pkg/capnslog" - apps "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/chunkserver/script" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" -) - -const ( - AppName = "curve-chunkserver" - ConfigMapNamePrefix = "curve-chunkserver-conf" - - Prefix = "/curvebs/chunkserver" - ChunkserverContainerDataDir = "/curvebs/chunkserver/data" - ChunkserverContainerLogDir = "/curvebs/chunkserver/logs" - - // start.sh - startChunkserverConfigMapName = "start-chunkserver-conf" - startChunkserverScriptFileDataKey = "start_chunkserver.sh" - startChunkserverMountPath = "/curvebs/tools/sbin/start_chunkserver.sh" -) - -type Cluster struct { - *daemon.Cluster -} - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "chunkserver") - -func New(c *daemon.Cluster) *Cluster { - return &Cluster{Cluster: c} -} - -// Start begins the chunkserver daemon -func (c *Cluster) Start(nodesInfo []daemon.NodeInfo, globalDCs []*topology.DeployConfig) ([]*topology.DeployConfig, error) { - logger.Infof("start running chunkserver in namespace %q", c.Namespace) - - err := c.CreateSpecRoleAllConfigMap(config.ROLE_CHUNKSERVER, config.ChunkserverAllConfigMapName) - if err != nil { - return nil, err - } - - err = c.CreateSpecRoleAllConfigMap(config.ROLE_SNAPSHOTCLONE, config.SnapShotCloneAllConfigMapName) - if err != nil { - return nil, err - } - // startProvisioningOverNodes format device and prepare chunk files - dcs, globalDCs, err := c.startProvisioningOverNodes(nodesInfo, globalDCs) - if err != nil { - return nil, err - } - - err = c.WaitForForamtJobCompletion(context.TODO(), 24*time.Hour) - if err != nil { - return nil, err - } - - k8sutil.UpdateStatusCondition(c.Kind, context.TODO(), &c.Context, c.NamespacedName, curvev1.ConditionTypeFormatedReady, curvev1.ConditionTrue, curvev1.ConditionFormatChunkfilePoolReason, "Formating chunkfilepool successed") - logger.Info("all jobs run completed in 24 hours") - - // create tool ConfigMap - err = c.createToolConfigMap() - if err != nil { - return nil, err - } - - // create topology ConfigMap - err = topology.CreateTopoConfigMap(c.Cluster, dcs) - if err != nil { - return nil, err - } - - // create physical pool - _, err = topology.RunCreatePoolJob(c.Cluster, dcs, topology.PYHSICAL_POOL) - if err != nil { - return nil, err - } - logger.Info("The physical pool was created successfully") - - // start all chunkservers for each device of every node - err = c.startChunkServers() - if err != nil { - return nil, err - } - - // wait all chunkservers online before create logical pool - logger.Info("starting all chunkserver") - k8sutil.UpdateStatusCondition(c.Kind, context.TODO(), &c.Context, c.NamespacedName, curvev1.ConditionTypeChunkServerReady, curvev1.ConditionTrue, curvev1.ConditionChunkServerClusterCreatedReason, "Chunkserver cluster has been created") - time.Sleep(30 * time.Second) - - // create logical pool - _, err = topology.RunCreatePoolJob(c.Cluster, dcs, topology.LOGICAL_POOL) - if err != nil { - return nil, err - } - logger.Info("create logical pool successed") - - return globalDCs, nil -} - -// startChunkServers start all chunkservers for each device of every node -func (c *Cluster) startChunkServers() error { - err := c.preStart() - if err != nil { - return err - } - - var deploymentsToWaitFor []*apps.Deployment - for _, csConfig := range chunkserverConfigs { - err := c.CreateEachConfigMap(config.ChunkserverConfigMapDataKey, &csConfig, csConfig.CurrentConfigMapName) - if err != nil { - return err - } - - d, err := c.makeDeployment(&csConfig) - if err != nil { - return err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create chunkserver deployment %s", csConfig.ResourceName) - } - logger.Infof("deployment for chunkserver %s already exists. updating if needed", csConfig.ResourceName) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.Context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return err - } - } - - return nil -} - -// preStart -func (c *Cluster) preStart() error { - if len(job2DeviceInfos) == 0 { - logger.Errorf("no job to format device and provision chunk file") - return nil - } - - if len(chunkserverConfigs) == 0 { - logger.Errorf("no device need to start chunkserver") - return nil - } - - if len(job2DeviceInfos) != len(chunkserverConfigs) { - return errors.New("failed to start chunkserver because of job numbers is not equal with chunkserver config") - } - - err := c.UpdateSpecRoleAllConfigMap(config.ChunkserverAllConfigMapName, startChunkserverScriptFileDataKey, script.START, nil) - if err != nil { - return err - } - - // if c.SnapShotClone.Enable { - s3Data, err := c.getS3ConfigMapData() - if err != nil { - return err - } - - err = c.UpdateSpecRoleAllConfigMap(config.SnapShotCloneAllConfigMapName, config.S3ConfigMapDataKey, s3Data, nil) - if err != nil { - return err - } - // } - - err = c.UpdateSpecRoleAllConfigMap(config.ChunkserverAllConfigMapName, config.CSClientConfigMapDataKey, "", &chunkserverConfigs[0]) - if err != nil { - return err - } - - return nil -} diff --git a/pkg/chunkserver/config.go b/pkg/chunkserver/config.go deleted file mode 100644 index d0653dfb..00000000 --- a/pkg/chunkserver/config.go +++ /dev/null @@ -1,79 +0,0 @@ -package chunkserver - -import ( - "strconv" - - "github.com/opencurve/curve-operator/pkg/config" -) - -// chunkserverConfig implements config.ConfigInterface -var _ config.ConfigInterface = &chunkserverConfig{} - -// chunkserverConfig for a single chunkserver -type chunkserverConfig struct { - Prefix string - Port int // chunkserver.conf(service_port) - ClusterMdsAddr string // chunkserver.conf, snap_client.conf, tools.conf - ClusterMdsDummyPort string // tools.conf - ClusterEtcdAddr string // tools.conf - ClusterSnapshotcloneAddr string // tools.conf - ClusterSnapshotcloneDummyPort string // tools.conf - - DataPathMap *chunkserverDataPathMap - ResourceName string - DaemonId string - CurrentConfigMapName string - DeviceName string - NodeName string - NodeIP string - HostSequence int - ReplicasSequence int - Replicas int -} - -// chunkserverDataPathMap represents the device on host and referred Mount Path in container -type chunkserverDataPathMap struct { - // HostDevice is the device name such as '/dev/sdb' - HostDevice string - - // HostLogDir - HostLogDir string - - // ContainerDataDir is the data dir of chunkserver such as '/curvebs/chunkserver/data/' - ContainerDataDir string - - // ContainerLogDir is the log dir of chunkserver such as '/curvebs/chunkserver/logs' - ContainerLogDir string -} - -func (c *chunkserverConfig) GetPrefix() string { return c.Prefix } -func (c *chunkserverConfig) GetServiceId() string { return "" } -func (c *chunkserverConfig) GetServiceRole() string { return "" } -func (c *chunkserverConfig) GetServiceHost() string { return "" } -func (c *chunkserverConfig) GetServiceHostSequence() string { return "" } -func (c *chunkserverConfig) GetServiceReplicaSequence() string { return "" } -func (c *chunkserverConfig) GetServiceReplicasSequence() string { return "" } -func (c *chunkserverConfig) GetServiceAddr() string { return "" } -func (c *chunkserverConfig) GetServicePort() string { return strconv.Itoa(c.Port) } -func (c *chunkserverConfig) GetServiceClientPort() string { return "" } -func (c *chunkserverConfig) GetServiceDummyPort() string { return "" } -func (c *chunkserverConfig) GetServiceProxyPort() string { return "" } -func (c *chunkserverConfig) GetServiceExternalAddr() string { return "" } -func (c *chunkserverConfig) GetServiceExternalPort() string { return "" } -func (c *chunkserverConfig) GetLogDir() string { return "" } -func (c *chunkserverConfig) GetDataDir() string { return "" } - -// cluster -func (c *chunkserverConfig) GetClusterEtcdHttpAddr() string { return "" } -func (c *chunkserverConfig) GetClusterEtcdAddr() string { return c.ClusterEtcdAddr } -func (c *chunkserverConfig) GetClusterMdsAddr() string { return c.ClusterMdsAddr } -func (c *chunkserverConfig) GetClusterMdsDummyAddr() string { return "" } -func (c *chunkserverConfig) GetClusterMdsDummyPort() string { return c.ClusterMdsDummyPort } -func (c *chunkserverConfig) GetClusterChunkserverAddr() string { return "" } -func (c *chunkserverConfig) GetClusterMetaserverAddr() string { return "" } -func (c *chunkserverConfig) GetClusterSnapshotcloneAddr() string { return c.ClusterSnapshotcloneAddr } -func (c *chunkserverConfig) GetClusterSnapshotcloneProxyAddr() string { return "" } -func (c *chunkserverConfig) GetClusterSnapshotcloneNginxUpstream() string { return "" } -func (c *chunkserverConfig) GetClusterSnapshotcloneDummyPort() string { - return c.ClusterSnapshotcloneDummyPort -} diff --git a/pkg/chunkserver/format_status.go b/pkg/chunkserver/format_status.go deleted file mode 100644 index 845416d7..00000000 --- a/pkg/chunkserver/format_status.go +++ /dev/null @@ -1,176 +0,0 @@ -package chunkserver - -import ( - "bytes" - "context" - "fmt" - "regexp" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/remotecommand" -) - -type device2Use struct { - nodeName string - deviceName string - devicePercent int - status string - usePercent int -} - -// WaitForForamtJobCompletion waits for a format job to reach the completed state. -// only one pod in one job -func (c *Cluster) WaitForForamtJobCompletion(ctx context.Context, timeout time.Duration) error { - retry := 0 - return wait.Poll(30*time.Second, timeout, func() (bool, error) { - du, completed, err := c.getJob2DeviceFormatProgress() - if err != nil { - logger.Errorf("failed to get device format progress %v", err) - return false, err - } - if completed { - return true, nil - } - - c.printProgress(retry, du) - retry++ - - return false, nil - }) -} - -// getJobFormatStatus gets one device(one job) usage that represents format progress -func (c *Cluster) getJob2DeviceFormatProgress() ([]device2Use, bool, error) { - device2UseArr := []device2Use{} - completed := 0 - for _, watchedJob2DeviceInfo := range job2DeviceInfos { - watchedJob := watchedJob2DeviceInfo.job - watchedNodeName := watchedJob2DeviceInfo.nodeName - wathedDevice := watchedJob2DeviceInfo.device - job, err := c.Context.Clientset.BatchV1().Jobs(c.NamespacedName.Namespace).Get(watchedJob.Name, metav1.GetOptions{}) - if err != nil { - return []device2Use{}, false, errors.Wrapf(err, "failed to get job %q in cluster", watchedJob.Name) - } - - if job.Status.Succeeded > 0 { - completed++ - if completed == len(job2DeviceInfos) { - logger.Info("all format jobs has finished.") - return device2UseArr, true, nil - } - continue - } - - labels := c.getPodLabels(watchedNodeName, wathedDevice.Name) - var labelSelector []string - for k, v := range labels { - labelSelector = append(labelSelector, k+"="+v) - } - selector := strings.Join(labelSelector, ",") - podList, _ := c.Context.Clientset.CoreV1().Pods(watchedJob.Namespace).List(metav1.ListOptions{ - LabelSelector: selector, - }) - if len(podList.Items) < 1 { - // not occur - logger.Warningf("no pod for job %q", watchedJob.Name) - continue - } - - // one job one pod one container - pod := podList.Items[0] - du, err := c.getDevUsedbyExecRequest(&pod, watchedNodeName, wathedDevice.Name, wathedDevice.Percentage, "Formatting") - if err != nil { - return []device2Use{}, false, errors.Wrap(err, "failed to get disk used percentage using exec request") - } - device2UseArr = append(device2UseArr, du) - } - - return device2UseArr, false, nil -} - -func (c *Cluster) getDevUsedbyExecRequest(pod *v1.Pod, nodeName, deviceName string, devicePercent int, status string) (device2Use, error) { - var ( - execOut bytes.Buffer - execErr bytes.Buffer - ) - req := c.Context.Clientset.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(pod.Name). - Namespace(pod.Namespace). - SubResource("exec") - req.VersionedParams(&v1.PodExecOptions{ - Container: pod.Spec.Containers[0].Name, - Command: []string{"df", "-h", deviceName}, - Stdout: true, - Stderr: true, - }, scheme.ParameterCodec) - - exec, err := remotecommand.NewSPDYExecutor(c.Context.KubeConfig, "POST", req.URL()) - if err != nil { - return device2Use{}, fmt.Errorf("failed to init executor: %v", err) - } - - err = exec.Stream(remotecommand.StreamOptions{ - Stdout: &execOut, - Stderr: &execErr, - Tty: false, - }) - - if err != nil { - return device2Use{}, fmt.Errorf("could not execute: %v", err) - } - - if execErr.Len() > 0 { - return device2Use{}, fmt.Errorf("stderr: %v", execErr.String()) - } - - cmdOutput := execOut.String() - re := regexp.MustCompile(`\S+\s+\S+\s+\S+\s+\S+\s+(?P\d+)%`) - use := 0 - match := re.FindStringSubmatch(cmdOutput) - if len(match) > 1 { - useStr := match[re.SubexpIndex("use")] - use, err = strconv.Atoi(useStr) - if err != nil { - return device2Use{}, err - } - } else { - logger.Info("Use value not found.") - } - - if use > devicePercent { - status = "Done" - } - deviceFormatInfo := device2Use{ - nodeName: nodeName, - deviceName: deviceName, - devicePercent: devicePercent, - status: status, - usePercent: use, - } - - return deviceFormatInfo, nil -} - -func (c *Cluster) printProgress(retry int, device2UseArr []device2Use) { - if retry != 0 { - fmt.Printf("\033[%dA", len(device2UseArr)) - } - - for _, device2Use := range device2UseArr { - logger.Infof("node=%s\tdevice=%s\tformatted=%d/%d\tstatus=%s", - device2Use.nodeName, - device2Use.deviceName, - device2Use.usePercent, - device2Use.devicePercent, - device2Use.status, - ) - } -} diff --git a/pkg/chunkserver/script/format.go b/pkg/chunkserver/script/format.go deleted file mode 100644 index e126755b..00000000 --- a/pkg/chunkserver/script/format.go +++ /dev/null @@ -1,22 +0,0 @@ -package script - -var FORMAT = ` -device_name=$1 -device_mount_path=$2 -percent=$3 -chunkfile_size=$4 -chunkfile_pool_dir=$5 -chunkfile_pool_meta_path=$6 - -mkfs.ext4 $device_name -mount $device_name $device_mount_path - -cd /curvebs/tools/sbin - -./curve_format \ - -allocatePercent=$percent \ - -fileSize=$chunkfile_size \ - -filePoolDir=$chunkfile_pool_dir \ - -filePoolMetaPath=$chunkfile_pool_meta_path \ - -fileSystemPath=$chunkfile_pool_dir -` diff --git a/pkg/chunkserver/script/start.go b/pkg/chunkserver/script/start.go deleted file mode 100644 index d75433be..00000000 --- a/pkg/chunkserver/script/start.go +++ /dev/null @@ -1,43 +0,0 @@ -package script - -var START = ` -device_name=$1 -device_mount_path=$2 -data_dir=$3 -node_ip=$4 -service_port=$5 -conf_path=$6 - -mkdir -p $device_mount_path -mount $device_name $device_mount_path - - -# for test -# while true; do echo hello; sleep 10;done - -cd /curvebs/chunkserver/sbin -./curvebs-chunkserver \ - -conf="${conf_path}" \ - -enableExternalServer=false \ - -copySetUri=local://"${data_dir}"/copysets \ - -raftLogUri=curve://"${data_dir}"/copysets \ - -raftSnapshotUri=curve://"${data_dir}"/copysets \ - -raft_sync_segments=true \ - -raft_max_install_snapshot_tasks_num=1 \ - -chunkServerIp=${node_ip} \ - -chunkFilePoolDir="${data_dir}" \ - -walFilePoolDir="${data_dir}" \ - -raft_sync=true \ - -raft_max_segment_size=8388608 \ - -raft_use_fsync_rather_than_fdatasync=false \ - -chunkFilePoolMetaPath="${data_dir}"/chunkfilepool.meta \ - -chunkServerStoreUri=local://"${data_dir}" \ - -chunkServerMetaUri=local://"${data_dir}"/chunkserver.dat \ - -bthread_concurrency=18 \ - -raft_sync_meta=true \ - -chunkServerExternalIp=${node_ip} \ - -chunkServerPort=${service_port} \ - -walFilePoolMetaPath="${data_dir}"/walfilepool.meta \ - -recycleUri=local://"${data_dir}"/recycler \ - -graceful_quit_on_sigterm=true -` diff --git a/pkg/chunkserver/spec.go b/pkg/chunkserver/spec.go deleted file mode 100644 index 15dc9bbc..00000000 --- a/pkg/chunkserver/spec.go +++ /dev/null @@ -1,130 +0,0 @@ -package chunkserver - -import ( - "path" - "strconv" - - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/logrotate" - "github.com/opencurve/curve-operator/pkg/topology" -) - -func (c *Cluster) makeDeployment(csConfig *chunkserverConfig) (*apps.Deployment, error) { - volumes := CSDaemonVolumes(csConfig) - vols, _ := topology.CreateTopoAndToolVolumeAndMount(c.Cluster) - volumes = append(volumes, vols...) - labels := daemon.CephDaemonAppLabels(AppName, c.Namespace, "chunkserver", csConfig.DaemonId, c.Kind) - - // add log config volume - logConfCMVolSource := &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "log-conf"}} - volumes = append(volumes, v1.Volume{Name: "log-conf", VolumeSource: v1.VolumeSource{ConfigMap: logConfCMVolSource}}) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: csConfig.ResourceName, - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.makeCSDaemonContainer(csConfig), - logrotate.MakeLogrotateContainer(), - }, - NodeName: csConfig.NodeName, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: csConfig.ResourceName, - Namespace: c.NamespacedName.Namespace, - Labels: labels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, err - } - - return d, nil -} - -// makeCSDaemonContainer create chunkserver container -func (c *Cluster) makeCSDaemonContainer(csConfig *chunkserverConfig) v1.Container { - - privileged := true - runAsUser := int64(0) - runAsNonRoot := false - readOnlyRootFilesystem := false - - // volumemount - volMounts := CSDaemonVolumeMounts(csConfig) - _, mounts := topology.CreateTopoAndToolVolumeAndMount(c.Cluster) - volMounts = append(volMounts, mounts...) - - argsDeviceName := csConfig.DeviceName - argsMountPath := ChunkserverContainerDataDir - - argsDataDir := path.Join(csConfig.Prefix, "data") - argsChunkServerIp := csConfig.NodeIP - argsChunkserverPort := strconv.Itoa(csConfig.Port) - argsConfigFileMountPath := path.Join(config.ChunkserverConfigMapMountPathDir, config.ChunkserverConfigMapDataKey) - - container := v1.Container{ - Name: "chunkserver", - Command: []string{ - "/bin/bash", - startChunkserverMountPath, - }, - Args: []string{ - argsDeviceName, - argsMountPath, - argsDataDir, - argsChunkServerIp, - argsChunkserverPort, - argsConfigFileMountPath, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: volMounts, - Ports: []v1.ContainerPort{ - { - Name: "listen-port", - ContainerPort: int32(csConfig.Port), - HostPort: int32(csConfig.Port), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, - }, - } - - return container -} diff --git a/pkg/chunkserver/tool.go b/pkg/chunkserver/tool.go deleted file mode 100644 index 6810cdd6..00000000 --- a/pkg/chunkserver/tool.go +++ /dev/null @@ -1,102 +0,0 @@ -package chunkserver - -import ( - "strings" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (c *Cluster) createToolConfigMap() error { - // get mds-conf-template from cluster - toolsCMTemplate, err := c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(config.DefaultConfigMapName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to get configmap %s from cluster", config.DefaultConfigMapName) - if kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get configmap %s from cluster", config.DefaultConfigMapName) - } - return errors.Wrapf(err, "failed to get configmap %s from cluster", config.DefaultConfigMapName) - } - toolsCMData := toolsCMTemplate.Data[config.ToolsConfigMapDataKey] - replacedToolsData, err := config.ReplaceConfigVars(toolsCMData, &chunkserverConfigs[0]) - if err != nil { - return err - } - - toolConfigMap := map[string]string{ - config.ToolsConfigMapDataKey: replacedToolsData, - } - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.ToolsConfigMapName, - Namespace: c.Namespace, - }, - Data: toolConfigMap, - } - - err = c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return err - } - - // Create topology-json-conf configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create tools-conf configmap in namespace %s", c.Namespace) - } - - return nil -} - -func (c *Cluster) getS3ConfigMapData() (string, error) { - s3CMTemplate, err := c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(config.DefaultConfigMapName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to get configmap %s from cluster", config.DefaultConfigMapName) - if kerrors.IsNotFound(err) { - return "", errors.Wrapf(err, "failed to get configmap %s from cluster", config.DefaultConfigMapName) - } - return "", errors.Wrapf(err, "failed to get configmap %s from cluster", config.DefaultConfigMapName) - } - - s3Data := s3CMTemplate.Data[config.S3ConfigMapDataKey] - s3MapData := translateS3StringToMap(s3Data) - s3MapData["s3.ak"] = c.SnapShotClone.S3Config.AK - s3MapData["s3.sk"] = c.SnapShotClone.S3Config.SK - s3MapData["s3.endpoint"] = c.SnapShotClone.S3Config.NosAddress - s3MapData["s3.bucket_name"] = c.SnapShotClone.S3Config.SnapShotBucketName - // reserved for backward compatible - s3MapData["s3.nos_address"] = c.SnapShotClone.S3Config.NosAddress - s3MapData["s3.snapshot_bucket_name"] = c.SnapShotClone.S3Config.SnapShotBucketName - - var configMapData string - for k, v := range s3MapData { - configMapData = configMapData + k + "=" + v + "\n" - } - - return configMapData, nil -} - -func translateS3StringToMap(data string) map[string]string { - lines := strings.Split(data, "\n") - config := make(map[string]string) - - for _, line := range lines { - line = strings.TrimSpace(line) - if strings.HasPrefix(line, "#") || line == "" { // skip the comment lines and blank lines - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - continue // ignore invalid line - } - key := strings.TrimSpace(parts[0]) - value := strings.TrimSpace(parts[1]) - config[key] = value - } - return config - -} diff --git a/pkg/chunkserver/volume.go b/pkg/chunkserver/volume.go deleted file mode 100644 index 5c6c1adc..00000000 --- a/pkg/chunkserver/volume.go +++ /dev/null @@ -1,220 +0,0 @@ -package chunkserver - -import ( - "fmt" - "path" - "strings" - - v1 "k8s.io/api/core/v1" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/config" -) - -const ( - chunkserverVolumeName = "chunkserver-data" -) - -// createFormatVolumeAndMount -func (c *Cluster) createFormatVolumeAndMount(device curvev1.DevicesSpec) ([]v1.Volume, []v1.VolumeMount) { - vols := []v1.Volume{} - mounts := []v1.VolumeMount{} - - mode := int32(0644) - formatCMVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: config.ChunkserverAllConfigMapName, - }, - Items: []v1.KeyToPath{ - { - Key: formatScriptFileDataKey, - Path: formatScriptFileDataKey, - Mode: &mode, - }, - }, - } - configVol := v1.Volume{ - Name: fmt.Sprint(config.ChunkserverAllConfigMapName, "-tool"), - VolumeSource: v1.VolumeSource{ - ConfigMap: formatCMVolSource, - }, - } - - // configmap volume mount path - formatCMVolumeMount := v1.VolumeMount{ - Name: fmt.Sprint(config.ChunkserverAllConfigMapName, "-tool"), - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: formatScriptMountPath, - SubPath: formatScriptFileDataKey, - } - vols = append(vols, configVol) - mounts = append(mounts, formatCMVolumeMount) - - // create hostpath volume and volume mount for device.MountPath - hostPathType := v1.HostPathDirectoryOrCreate - volumeName := strings.TrimSpace(device.MountPath) - volumeName = strings.TrimRight(volumeName, "/") - volumeNameArr := strings.Split(volumeName, "/") - volumeName = volumeNameArr[len(volumeNameArr)-1] - // volume name : chunkserver-data-chunkserver0 - tmpVolumeName := chunkserverVolumeName + "-" + volumeName - - src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: device.MountPath, Type: &hostPathType}} - vols = append(vols, v1.Volume{Name: tmpVolumeName, VolumeSource: src}) - mounts = append(mounts, v1.VolumeMount{Name: tmpVolumeName, MountPath: ChunkserverContainerDataDir}) - - // Create hostpath volume and volume mount for '/dev' - src = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}} - vols = append(vols, v1.Volume{Name: "devices", VolumeSource: src}) - mounts = append(mounts, v1.VolumeMount{Name: "devices", MountPath: "/dev"}) - - return vols, mounts -} - -// CSDaemonVolumes DaemonVolumes returns the pod volumes used only by chunkserver -func CSDaemonVolumes(csConfig *chunkserverConfig) []v1.Volume { - vols := []v1.Volume{} - - // create configmap volume - configMapVolumes, _ := CSConfigConfigMapVolumeAndMount(csConfig) - vols = append(vols, configMapVolumes...) - - // create hostpath volume for '/dev' - src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/dev"}} - vols = append(vols, v1.Volume{Name: "dev-volume", VolumeSource: src}) - - // create logs volume for - hostPathType := v1.HostPathDirectoryOrCreate - src = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: csConfig.DataPathMap.HostLogDir, Type: &hostPathType}} - vols = append(vols, v1.Volume{Name: "log-volume", VolumeSource: src}) - - return vols -} - -// CSDaemonVolumeMounts returns the pod container volume mounth used only by chunkserver -func CSDaemonVolumeMounts(csConfig *chunkserverConfig) []v1.VolumeMount { - mounts := []v1.VolumeMount{} - - // create configmap mount path - _, configMapMounts := CSConfigConfigMapVolumeAndMount(csConfig) - mounts = append(mounts, configMapMounts...) - - // create data mount path and log mount path on container - mounts = append(mounts, v1.VolumeMount{Name: "dev-volume", MountPath: "/dev"}) - mounts = append(mounts, v1.VolumeMount{Name: "log-volume", MountPath: csConfig.DataPathMap.ContainerLogDir}) - - return mounts -} - -// CSConfigConfigMapVolumeAndMount creates configmap volume and volume mount for daemon chunkserver pod -func CSConfigConfigMapVolumeAndMount(csConfig *chunkserverConfig) ([]v1.Volume, []v1.VolumeMount) { - vols := []v1.Volume{} - mounts := []v1.VolumeMount{} - - // mount cs_client.conf and s3.conf - mode := int32(0644) - CSClientVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: config.ChunkserverAllConfigMapName, - }, - Items: []v1.KeyToPath{ - {Key: config.CSClientConfigMapDataKey, Path: config.CSClientConfigMapDataKey, Mode: &mode}, - // {Key: config.S3ConfigMapDataKey, Path: config.S3ConfigMapDataKey, Mode: &mode}, - }, - } - - csClientVols := v1.Volume{ - Name: "cs-client-conf", - VolumeSource: v1.VolumeSource{ - ConfigMap: CSClientVolSource, - }, - } - vols = append(vols, csClientVols) - - m := v1.VolumeMount{ - Name: "cs-client-conf", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.ChunkserverConfigMapMountPathDir, config.CSClientConfigMapDataKey), - SubPath: config.CSClientConfigMapDataKey, - } - mounts = append(mounts, m) - - // s3.conf - s3VolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: config.SnapShotCloneAllConfigMapName, - }, - Items: []v1.KeyToPath{ - {Key: config.S3ConfigMapDataKey, Path: config.S3ConfigMapDataKey, Mode: &mode}, - }, - } - - s3Vols := v1.Volume{ - Name: "s3-conf", - VolumeSource: v1.VolumeSource{ - ConfigMap: s3VolSource, - }, - } - vols = append(vols, s3Vols) - - m = v1.VolumeMount{ - Name: "s3-conf", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.ChunkserverConfigMapMountPathDir, config.S3ConfigMapDataKey), - SubPath: config.S3ConfigMapDataKey, - } - mounts = append(mounts, m) - - // mount start_chunkserver.sh - scriptVolSrouce := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: config.ChunkserverAllConfigMapName, - }, - Items: []v1.KeyToPath{ - {Key: startChunkserverScriptFileDataKey, Path: startChunkserverScriptFileDataKey, Mode: &mode}, - }, - } - - scriptVols := v1.Volume{ - Name: "start-server-volume", - VolumeSource: v1.VolumeSource{ - ConfigMap: scriptVolSrouce, - }, - } - vols = append(vols, scriptVols) - - m = v1.VolumeMount{ - Name: "start-server-volume", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: startChunkserverMountPath, - SubPath: startChunkserverScriptFileDataKey, - } - mounts = append(mounts, m) - - // mount chunkserver.conf - configMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: csConfig.CurrentConfigMapName, - }, - Items: []v1.KeyToPath{ - {Key: config.ChunkserverConfigMapDataKey, Path: config.ChunkserverConfigMapDataKey, Mode: &mode}, - }, - } - configVol := v1.Volume{ - Name: csConfig.CurrentConfigMapName, - VolumeSource: v1.VolumeSource{ - ConfigMap: configMapVolSource, - }, - } - vols = append(vols, configVol) - - m = v1.VolumeMount{ - Name: csConfig.CurrentConfigMapName, - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.ChunkserverConfigMapMountPathDir, config.ChunkserverConfigMapDataKey), - SubPath: config.ChunkserverConfigMapDataKey, - } - mounts = append(mounts, m) - - return vols, mounts -} diff --git a/pkg/clusterd/bscluster.go b/pkg/clusterd/bscluster.go new file mode 100644 index 00000000..b2aa9a8e --- /dev/null +++ b/pkg/clusterd/bscluster.go @@ -0,0 +1,111 @@ +package clusterd + +import ( + "github.com/go-logr/logr" + curvev1 "github.com/opencurve/curve-operator/api/v1" +) + +var _ Clusterer = &BsClusterManager{} + +type BsClusterManager struct { + Context Context + Cluster *curvev1.CurveCluster + Logger logr.Logger + + UUID string + Kind string + OwnerInfo *OwnerInfo +} + +func (c *BsClusterManager) GetContext() Context { return c.Context } +func (c *BsClusterManager) GetName() string { return c.Cluster.Name } +func (c *BsClusterManager) GetNameSpace() string { return c.Cluster.Namespace } +func (c *BsClusterManager) GetUUID() string { return c.UUID } +func (c *BsClusterManager) GetKind() string { return c.Kind } +func (c *BsClusterManager) GetOwnerInfo() *OwnerInfo { return c.OwnerInfo } +func (c *BsClusterManager) GetNodes() []string { return c.Cluster.Spec.Nodes } +func (c *BsClusterManager) GetDataDir() string { return c.Cluster.Spec.DataDir } +func (c *BsClusterManager) GetLogDir() string { return c.Cluster.Spec.LogDir } +func (c *BsClusterManager) GetContainerImage() string { return c.Cluster.Spec.CurveVersion.Image } +func (c *BsClusterManager) GetCopysets() int { return *c.Cluster.Spec.Copysets } +func (c *BsClusterManager) GetEtcdSpec() *curvev1.EtcdSpec { return c.Cluster.Spec.Etcd } +func (c *BsClusterManager) GetMdsSpec() *curvev1.MdsSpec { return c.Cluster.Spec.Mds } +func (c *BsClusterManager) GetChunkserverSpec() *curvev1.StorageScopeSpec { + return c.Cluster.Spec.Chunkserver +} +func (c *BsClusterManager) GetMetaserverSpec() *curvev1.MetaServerSpec { return nil } +func (c *BsClusterManager) GetSnapShotSpec() *curvev1.SnapShotCloneSpec { + return c.Cluster.Spec.SnapShotClone +} +func (c *BsClusterManager) GetRoleInstances(role string) int { + switch role { + case ROLE_ETCD, ROLE_MDS: + if len(c.GetNodes()) == 1 { // stand alone + return 3 + } + return 1 + case ROLE_CHUNKSERVER: + return c.Cluster.Spec.Chunkserver.Instances + } + return 0 +} + +func (c *BsClusterManager) GetRolePort(role string) int { + switch role { + case ROLE_ETCD: + return *c.Cluster.Spec.Etcd.PeerPort + case ROLE_MDS: + return *c.Cluster.Spec.Mds.Port + case ROLE_CHUNKSERVER: + return *c.Cluster.Spec.Chunkserver.Port + default: + return 0 + } +} + +func (c *BsClusterManager) GetRoleClientPort(role string) int { + switch role { + case ROLE_ETCD: + return *c.Cluster.Spec.Etcd.ClientPort + default: + return 0 + } +} + +func (c *BsClusterManager) GetRoleDummyPort(role string) int { + switch role { + case ROLE_MDS: + return *c.Cluster.Spec.Mds.DummyPort + case ROLE_SNAPSHOTCLONE: + return *c.Cluster.Spec.SnapShotClone.DummyPort + default: + return 0 + } +} + +func (c *BsClusterManager) GetRoleProxyPort(role string) int { + switch role { + case ROLE_SNAPSHOTCLONE: + return *c.Cluster.Spec.SnapShotClone.ProxyPort + } + return 0 +} + +func (c *BsClusterManager) GetRoleExternalPort(role string) int { + return 0 +} + +func (c *BsClusterManager) GetRoleConfigs(role string) map[string]string { + switch role { + case ROLE_ETCD: + return c.Cluster.Spec.Etcd.Config + case ROLE_MDS: + return c.Cluster.Spec.Mds.Config + case ROLE_METASERVER: + return c.Cluster.Spec.Chunkserver.Config + case ROLE_SNAPSHOTCLONE: + return c.Cluster.Spec.SnapShotClone.Config + default: + return nil + } +} diff --git a/pkg/clusterd/cluster.go b/pkg/clusterd/cluster.go new file mode 100644 index 00000000..c8a3585a --- /dev/null +++ b/pkg/clusterd/cluster.go @@ -0,0 +1,34 @@ +package clusterd + +import ( + curvev1 "github.com/opencurve/curve-operator/api/v1" +) + +type Clusterer interface { + GetContext() Context + + GetName() string + GetNameSpace() string + GetUUID() string + GetKind() string + GetOwnerInfo() *OwnerInfo + + GetContainerImage() string + GetNodes() []string + GetDataDir() string + GetLogDir() string + GetCopysets() int + GetEtcdSpec() *curvev1.EtcdSpec + GetMdsSpec() *curvev1.MdsSpec + GetChunkserverSpec() *curvev1.StorageScopeSpec + GetMetaserverSpec() *curvev1.MetaServerSpec + GetSnapShotSpec() *curvev1.SnapShotCloneSpec + + GetRoleInstances(role string) int + GetRolePort(role string) int + GetRoleClientPort(role string) int + GetRoleDummyPort(role string) int + GetRoleProxyPort(role string) int + GetRoleExternalPort(role string) int + GetRoleConfigs(role string) map[string]string +} diff --git a/pkg/clusterd/fscluster.go b/pkg/clusterd/fscluster.go new file mode 100644 index 00000000..f76a10af --- /dev/null +++ b/pkg/clusterd/fscluster.go @@ -0,0 +1,106 @@ +package clusterd + +import ( + "github.com/go-logr/logr" + curvev1 "github.com/opencurve/curve-operator/api/v1" +) + +var _ Clusterer = &FsClusterManager{} + +type FsClusterManager struct { + Context Context + Cluster *curvev1.Curvefs + Logger logr.Logger + + UUID string + Kind string + OwnerInfo *OwnerInfo +} + +func (c *FsClusterManager) GetContext() Context { return c.Context } +func (c *FsClusterManager) GetName() string { return c.Cluster.Name } +func (c *FsClusterManager) GetNameSpace() string { return c.Cluster.Namespace } +func (c *FsClusterManager) GetUUID() string { return c.UUID } +func (c *FsClusterManager) GetKind() string { return c.Kind } +func (c *FsClusterManager) GetOwnerInfo() *OwnerInfo { return c.OwnerInfo } +func (c *FsClusterManager) GetNodes() []string { return c.Cluster.Spec.Nodes } +func (c *FsClusterManager) GetDataDir() string { return c.Cluster.Spec.DataDir } +func (c *FsClusterManager) GetLogDir() string { return c.Cluster.Spec.LogDir } +func (c *FsClusterManager) GetContainerImage() string { return c.Cluster.Spec.CurveVersion.Image } +func (c *FsClusterManager) GetCopysets() int { return *c.Cluster.Spec.Copysets } +func (c *FsClusterManager) GetEtcdSpec() *curvev1.EtcdSpec { return c.Cluster.Spec.Etcd } +func (c *FsClusterManager) GetMdsSpec() *curvev1.MdsSpec { return c.Cluster.Spec.Mds } +func (c *FsClusterManager) GetChunkserverSpec() *curvev1.StorageScopeSpec { return nil } +func (c *FsClusterManager) GetMetaserverSpec() *curvev1.MetaServerSpec { + return c.Cluster.Spec.MetaServer +} +func (c *FsClusterManager) GetSnapShotSpec() *curvev1.SnapShotCloneSpec { return nil } +func (c *FsClusterManager) GetRoleInstances(role string) int { + switch role { + case ROLE_ETCD, ROLE_MDS: + if len(c.GetNodes()) == 1 { // stand alone + return 3 + } + case ROLE_METASERVER: + return c.Cluster.Spec.MetaServer.Instances + } + + return 1 +} + +func (c *FsClusterManager) GetRolePort(role string) int { + switch role { + case ROLE_ETCD: + return *c.Cluster.Spec.Etcd.PeerPort + case ROLE_MDS: + return *c.Cluster.Spec.Mds.Port + case ROLE_METASERVER: + return *c.Cluster.Spec.MetaServer.Port + default: + return 0 + } +} + +func (c *FsClusterManager) GetRoleClientPort(role string) int { + switch role { + case ROLE_ETCD: + return *c.Cluster.Spec.Etcd.ClientPort + default: + return 0 + } +} + +func (c *FsClusterManager) GetRoleDummyPort(role string) int { + switch role { + case ROLE_MDS: + return *c.Cluster.Spec.Mds.DummyPort + default: + return 0 + } +} + +func (c *FsClusterManager) GetRoleProxyPort(role string) int { + return 0 +} + +func (c *FsClusterManager) GetRoleExternalPort(role string) int { + switch role { + case ROLE_METASERVER: + return *c.Cluster.Spec.MetaServer.ExternalPort + default: + return 0 + } +} + +func (c *FsClusterManager) GetRoleConfigs(role string) map[string]string { + switch role { + case ROLE_ETCD: + return c.Cluster.Spec.Etcd.Config + case ROLE_MDS: + return c.Cluster.Spec.Mds.Config + case ROLE_METASERVER: + return c.Cluster.Spec.MetaServer.Config + default: + return nil + } +} diff --git a/pkg/k8sutil/resources.go b/pkg/clusterd/owner_reference.go similarity index 79% rename from pkg/k8sutil/resources.go rename to pkg/clusterd/owner_reference.go index 620fe91e..3b619d39 100644 --- a/pkg/k8sutil/resources.go +++ b/pkg/clusterd/owner_reference.go @@ -1,6 +1,5 @@ -package k8sutil +package clusterd -// MergeResourceRequirements merges two resource requirements together (first overrides second values) import ( "encoding/json" "fmt" @@ -26,7 +25,7 @@ func NewOwnerInfo(owner metav1.Object, scheme *runtime.Scheme) *OwnerInfo { return &OwnerInfo{owner: owner, scheme: scheme} } -// NewOwnerInfoWithOwnerRef create a new ownerInfo to set ownerReference by rook itself +// NewOwnerInfoWithOwnerRef create a new ownerInfo to set ownerReference by itself func NewOwnerInfoWithOwnerRef(ownerRef *metav1.OwnerReference, namespace string) *OwnerInfo { return &OwnerInfo{ownerRef: ownerRef, ownerRefNamespace: namespace} } @@ -56,44 +55,6 @@ func (info *OwnerInfo) validateController(object metav1.Object) error { return nil } -// // SetOwnerReference set the owner reference of object -// func (info *OwnerInfo) SetOwnerReference(object metav1.Object) error { -// if info.owner != nil { -// return controllerutil.SetOwnerReference(info.owner, object, info.scheme) -// } -// if info.ownerRef == nil { -// return nil -// } -// err := info.validateOwner(object) -// if err != nil { -// return err -// } -// ownerRefs := object.GetOwnerReferences() -// for _, v := range ownerRefs { -// if referSameObject(v, *info.ownerRef) { -// return nil -// } -// } -// ownerRefs = append(ownerRefs, *info.ownerRef) -// object.SetOwnerReferences(ownerRefs) -// return nil -// } - -// // The original code is in https://github.com/kubernetes-sigs/controller-runtime/blob/a905949b9040084f0c6d2a27ec70e77c3c5c0931/pkg/controller/controllerutil/controllerutil.go#L160 -// func referSameObject(a, b metav1.OwnerReference) bool { -// groupVersionA, err := schema.ParseGroupVersion(a.APIVersion) -// if err != nil { -// return false -// } - -// groupVersionB, err := schema.ParseGroupVersion(b.APIVersion) -// if err != nil { -// return false -// } - -// return groupVersionA.Group == groupVersionB.Group && a.Kind == b.Kind && a.Name == b.Name -// } - // SetControllerReference set the controller reference of object func (info *OwnerInfo) SetControllerReference(object metav1.Object) error { if info.owner != nil { diff --git a/pkg/clusterd/util.go b/pkg/clusterd/util.go new file mode 100644 index 00000000..27a0a698 --- /dev/null +++ b/pkg/clusterd/util.go @@ -0,0 +1,14 @@ +package clusterd + +const ( + KIND_CURVEBS = "curvebs" + KIND_CURVEFS = "curvefs" +) + +const ( + ROLE_ETCD = "etcd" + ROLE_MDS = "mds" + ROLE_CHUNKSERVER = "chunkserver" + ROLE_SNAPSHOTCLONE = "snapshotclone" + ROLE_METASERVER = "metaserver" +) diff --git a/pkg/config/config.go b/pkg/config/config.go deleted file mode 100644 index 5950d42c..00000000 --- a/pkg/config/config.go +++ /dev/null @@ -1,117 +0,0 @@ -package config - -import "github.com/coreos/pkg/capnslog" - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "config") - -const ( - KIND_CURVEBS = "curvebs" - KIND_CURVEFS = "curvefs" -) - -const ( - ROLE_ETCD = "etcd" - ROLE_MDS = "mds" - ROLE_SNAPSHOTCLONE = "snapshotclone" - ROLE_METASERVER = "metaserver" - ROLE_CHUNKSERVER = "chunkserver" -) - -const ( - // template configmap - DefaultConfigMapName = "curve-conf-default" - - // all chunkserver config map - ChunkserverAllConfigMapName = "chunkserver-all-config" - SnapShotCloneAllConfigMapName = "snapshotclone-all-config" - - // configmap to record the endpoints of etcd - EtcdOverrideConfigMapName = "etcd-endpoints-override" - EtcdOvverideConfigMapDataKey = "etcdEndpoints" - ClusterEtcdAddr = "clusterEtcdAddr" - - // configmap to record the endpoints of mds - MdsOverrideConfigMapName = "mds-endpoints-override" - MdsOvverideConfigMapDataKey = "mdsEndpoints" - ClusterMdsDummyAddr = "clusterMdsDummyAddr" - ClusterMdsDummyPort = "clusterMdsDummyPort" - - // configuration - // etcd.conf - EtcdConfigMapDataKey = "etcd.conf" - EtcdConfigMapMountPathDir = "/curvebs/etcd/conf" - FSEtcdConfigMapMountPathDir = "/curvefs/etcd/conf" - FSMdsConfigMapMountPathDir = "/curvefs/mds/conf" - - // mds.conf - MdsConfigMapDataKey = "mds.conf" - MdsConfigMapMountPathDir = "/curvebs/mds/conf" - - // chunkserver.conf - ChunkserverConfigMapDataKey = "chunkserver.conf" - ChunkserverConfigMapMountPathDir = "/curvebs/chunkserver/conf" - - // cs_client.conf - CSClientConfigMapDataKey = "cs_client.conf" - CSClientConfigMapMountPathDir = "/curvebs/chunkserver/conf" - - // s3.conf - S3ConfigMapDataKey = "s3.conf" - S3ConfigMapMountPathDir = "/curvebs/chunkserver/conf" - S3ConfigMapMountSnapPathDir = "/curvebs/snapshotclone/conf" - - // topology.json - TopoJsonConfigMapName = "topology-json-conf" - TopoJsonConfigmapDataKey = "topology.json" - TopoJsonConfigmapMountPathDir = "/curvebs/tools/conf" - FSTopoJsonConfigmapMountPathDir = "/curvefs/tools/conf" - - // tools.conf - ToolsConfigMapName = "tools-conf" - ToolsConfigMapDataKey = "tools.conf" - ToolsConfigMapMountPathDir = "/etc/curve" - FSToolsConfigMapMountPathDir = "/etc/curvefs" - - // snap_client.conf - SnapClientConfigMapDataKey = "snap_client.conf" - SnapClientConfigMapMountPath = "/curvebs/snapshotclone/conf" - - // snapshotclone.conf - SnapShotCloneConfigMapDataKey = "snapshotclone.conf" - SnapShotCloneConfigMapMountPath = "/curvebs/snapshotclone/conf" - - // nginx.conf - NginxConfigMapDataKey = "nginx.conf" - NginxConfigMapMountPath = "/curvebs/snapshotclone/conf" - - // start nginx.conf - StartSnapConfigMapDataKey = "start_snap.sh" - StartSnapConfigMapMountPath = "/curvebs/tools/sbin/start_snap.sh" - - // metaserver.conf - MetaServerConfigMapDataKey = "metaserver.conf" - MetaServerConfigMapMountPath = "/curvefs/metaserver/conf" - - // prometheus.yaml - PrometheusConfigMapName = "prometheus-conf" - PrometheusConfigMapDataKey = "prometheus.yml" - - // grafana datasource yaml - GrafanaDataSourcesConfigMapName = "grafana-conf" - GrafanaDataSourcesConfigMapDataKey = "all.yml" - GrafanaDataSourcesConfigMapMountPath = "/etc/grafana/provisioning/datasources" - - // grafana dashboards - GrafanaDashboardsMountPath = "/etc/grafana/provisioning/dashboards" - - // grafana INI config - GrafanaINIConfigMapDataKey = "grafana.ini" - GrafanaINIConfigMountPath = "/etc/grafana" - - // report.sh - ReportConfigMapName = "report-conf" - ReportConfigMapDataKey = "report.sh" - ReportConfigMapMountPathCommon = "tools/sbin/report" // a new path -) - -const GrafanaDashboardsTemp = "grafana-dashboard-temp" diff --git a/pkg/config/datapath.go b/pkg/config/datapath.go deleted file mode 100644 index 78790fde..00000000 --- a/pkg/config/datapath.go +++ /dev/null @@ -1,31 +0,0 @@ -package config - -// A DataPathMap is a struct which contains information about where Curve daemon data is stored in -// containers and whether the data should be persisted to the host. If it is persisted to the host, -// directory on the host where the specific daemon's data is stored is given. -type DataPathMap struct { - // HostDataDir should be set to the path on the host where the specific daemon's data is stored. - HostDataDir string - - // HostLogDir should be set to the path on the host where the specific daemon's log is stored. - HostLogDir string - - // ContainerDataDir should be set to the path in the container - // where the specific daemon's data is stored. - ContainerDataDir string - - // ContainerDataDir should be set to the path in the container - // where the specific daemon's log is stored. - ContainerLogDir string -} - -// NewDaemonDataPathMap returns a new DataPathMap for a daemon which does not utilize a data -// dir in the container as the mon, mgr, osd, mds, and rgw daemons do. -func NewDaemonDataPathMap(hostDataDir string, hostLogDir string, containerDataDir string, containerLogDir string) *DataPathMap { - return &DataPathMap{ - HostDataDir: hostDataDir, - HostLogDir: hostLogDir, - ContainerDataDir: containerDataDir, - ContainerLogDir: containerLogDir, - } -} diff --git a/pkg/config/variables.go b/pkg/config/variables.go deleted file mode 100644 index 92aefbc3..00000000 --- a/pkg/config/variables.go +++ /dev/null @@ -1,158 +0,0 @@ -package config - -import ( - "regexp" - - "github.com/pkg/errors" -) - -/* - * built-in variables: - * - * service: - * ${prefix} "/curvebs/{etcd,mds,chunkserver}" - * ${service_id} "c690bde11d1a" - * ${service_role} "mds" - * ${service_host} "10.0.0.1" - * ${service_host_sequence} "1" - * ${service_replicas_sequence} "1" - * ${format_replicas_sequence} "01" - * ${service_addr} "10.0.0.1" - * ${service_port} "6666" - * ${service_client_port} "2379" (etcd) - * ${service_dummy_port} "6667" (snapshotclone/mds) - * ${service_proxy_port} "8080" (snapshotclone) - * ${service_external_addr} "10.0.10.1" (chunkserver/metaserver) - * ${service_external_port} "7800" (metaserver) - * ${log_dir} "/data/logs" - * ${data_dir} "/data" - * ${random_uuid} "6fa8f01c411d7655d0354125c36847bb" - * - * cluster: - * ${cluster_etcd_http_addr} "etcd1=http://10.0.10.1:2380,etcd2=http://10.0.10.2:2380,etcd3=http://10.0.10.3:2380" - * ${cluster_etcd_addr} "10.0.10.1:2380,10.0.10.2:2380,10.0.10.3:2380" - * ${cluster_mds_addr} "10.0.10.1:6666,10.0.10.2:6666,10.0.10.3:6666" - * ${cluster_mds_dummy_addr} "10.0.10.1:6667,10.0.10.2:6667,10.0.10.3:6667" - * ${cluster_mds_dummy_port} "6667,6668,6669" - * ${cluster_chunkserver_addr} "10.0.10.1:6800,10.0.10.2:6800,10.0.10.3:6800" - * ${cluster_snapshotclone_addr} "10.0.10.1:5555,10.0.10.2:5555,10.0.10.3:5555" - * ${cluster_snapshotclone_proxy_addr} "10.0.10.1:8080,10.0.10.2:8080,10.0.10.3:8083" - * ${cluster_snapshotclone_dummy_port} "8081,8082,8083" - * ${cluster_snapshotclone_nginx_upstream} "server 10.0.0.1:5555; server 10.0.0.3:5555; server 10.0.0.3:5555;" - * ${cluster_metaserver_addr} "10.0.10.1:6701,10.0.10.2:6701,10.0.10.3:6701" - */ - -const ( - REGEX_VARIABLE = `\${([^${}]+)}` // ${var_name} -) - -type ConfigInterface interface { - GetPrefix() string - GetServiceId() string - GetServiceRole() string - GetServiceHost() string - GetServiceHostSequence() string - GetServiceReplicaSequence() string - GetServiceReplicasSequence() string - GetServiceAddr() string - GetServicePort() string - GetServiceClientPort() string - GetServiceDummyPort() string - GetServiceProxyPort() string - GetServiceExternalAddr() string - GetServiceExternalPort() string - GetLogDir() string - GetDataDir() string - // cluster - GetClusterEtcdHttpAddr() string - GetClusterEtcdAddr() string - GetClusterMdsAddr() string - GetClusterMdsDummyAddr() string - GetClusterMdsDummyPort() string - GetClusterChunkserverAddr() string - GetClusterMetaserverAddr() string - GetClusterSnapshotcloneAddr() string - GetClusterSnapshotcloneProxyAddr() string - GetClusterSnapshotcloneDummyPort() string - GetClusterSnapshotcloneNginxUpstream() string -} - -func getValue(name string, dc ConfigInterface) string { - switch name { - case "prefix": - return dc.GetPrefix() - case "service_id": - return dc.GetServiceId() - case "service_role": - return dc.GetServiceRole() - case "service_host": - return dc.GetServiceHost() - case "service_host_sequence": - return dc.GetServiceHostSequence() - case "service_replica_sequence": - return dc.GetServiceReplicaSequence() - case "service_replicas_sequence": - return dc.GetServiceReplicasSequence() - case "service_addr": - return dc.GetServiceAddr() - case "service_port": - return dc.GetServicePort() - case "service_client_port": // etcd - return dc.GetServiceClientPort() - case "service_dummy_port": // mds, snapshotclone - return dc.GetServiceDummyPort() - case "service_proxy_port": // snapshotclone - return dc.GetServiceProxyPort() - case "service_external_addr": // chunkserver, metaserver - return dc.GetServiceExternalAddr() - case "service_external_port": // metaserver - return dc.GetServiceExternalPort() - case "log_dir": - return dc.GetLogDir() - case "data_dir": - return dc.GetDataDir() - case "cluster_etcd_http_addr": - return dc.GetClusterEtcdHttpAddr() - case "cluster_etcd_addr": - return dc.GetClusterEtcdAddr() - case "cluster_mds_addr": - return dc.GetClusterMdsAddr() - case "cluster_mds_dummy_addr": - return dc.GetClusterMdsDummyAddr() - case "cluster_mds_dummy_port": - return dc.GetClusterMdsDummyPort() - case "cluster_chunkserver_addr": - return dc.GetClusterChunkserverAddr() - case "cluster_metaserver_addr": - return dc.GetClusterMetaserverAddr() - case "cluster_snapshotclone_addr": - return dc.GetClusterSnapshotcloneAddr() - case "cluster_snapshotclone_proxy_addr": - return dc.GetClusterSnapshotcloneProxyAddr() - case "cluster_snapshotclone_dummy_port": - return dc.GetClusterSnapshotcloneDummyPort() - case "cluster_snapshotclone_nginx_upstream": - return dc.GetClusterSnapshotcloneNginxUpstream() - } - - return "" -} - -// ReplaceConfigVars replaces vars in config string -func ReplaceConfigVars(confStr string, c ConfigInterface) (string, error) { - r, err := regexp.Compile(REGEX_VARIABLE) - if err != nil { - return "", err - } - - matches := r.ReplaceAllStringFunc(confStr, func(keyName string) string { - return getValue(keyName[2:len(keyName)-1], c) - }) - - if len(matches) == 0 { - logger.Error("No matches for regexp") - return "", errors.Wrap(err, "No matches for regexp") - } - - return matches, nil -} diff --git a/pkg/controllers/bs_controller.go b/pkg/controllers/bs_controller.go new file mode 100644 index 00000000..8618d85e --- /dev/null +++ b/pkg/controllers/bs_controller.go @@ -0,0 +1,362 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + curvev1 "github.com/opencurve/curve-operator/api/v1" + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/service" + "github.com/opencurve/curve-operator/pkg/topology" +) + +// CurveClusterReconciler reconciles a CurveCluster object +type CurveClusterReconciler struct { + Client client.Client + Log logr.Logger + Scheme *runtime.Scheme + + context clusterd.Context + clusterMap map[string]*clusterd.BsClusterManager +} + +func NewCurveClusterReconciler( + client client.Client, + log logr.Logger, + scheme *runtime.Scheme, + context clusterd.Context, +) *CurveClusterReconciler { + return &CurveClusterReconciler{ + Client: client, + Log: log, + Scheme: scheme, + + context: context, + clusterMap: make(map[string]*clusterd.BsClusterManager), + } +} + +// +kubebuilder:rbac:groups=operator.curve.io,resources=curveclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=operator.curve.io,resources=curveclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create;update;get;list;watch;delete +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete + +func (r *CurveClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("Curve BS cluster", req.NamespacedName) + log.Info("reconcileing CurveCluster") + + r.context.Client = r.Client + ctx := context.Background() + + // Fetch the curveCluster instance + var curveCluster *curvev1.CurveCluster + if err := r.Client.Get(ctx, req.NamespacedName, curveCluster); err != nil { + logger.Error(err, "curvefs resource not found. Ignoring since object must be deleted.") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Set a finalizer so we can do cleanup before the object goes away + if err := k8sutil.AddFinalizerIfNotPresent(ctx, r.Client, curveCluster); err != nil { + return reconcile.Result{}, err + } + + // Delete: the CR was deleted + if !curveCluster.GetDeletionTimestamp().IsZero() { + return reconcile.Result{}, r.reconcileCurveBsDelete(curveCluster) + } + + ownerInfo := clusterd.NewOwnerInfo(curveCluster, r.Scheme) + return r.reconcileCurveCluster(curveCluster, ownerInfo) + + // k8sutil.UpdateCondition(context.TODO(), + // r.Client, + // clusterd.KIND_CURVEBS, + // req.NamespacedName, + // curvev1.ClusterRunning, + // curvev1.ClusterCondition{ + // Type: curvev1.ConditionClusterReady, + // Status: curvev1.ConditionStatusTrue, + // Reason: curvev1.ConditionReconcileSucceeded, + // Message: "create cluster successed", + // }, + // ) +} + +// reconcileDelete +func (r *CurveClusterReconciler) reconcileCurveBsDelete(clusterObj *curvev1.CurveCluster) error { + // get currnet cluster and delete it + cluster, ok := r.clusterMap[clusterObj.GetNamespace()] + if !ok { + logger.Errorf("failed to find the cluster %q", clusterObj.GetName()) + return errors.New("internal error") + } + + dcs, err := topology.ParseTopology(cluster) + if err != nil { + return err + } + err = service.StartClusterCleanUpJob(cluster, dcs) + if err != nil { + return err + } + + // delete it from clusterMap + if _, ok := r.clusterMap[cluster.GetNameSpace()]; ok { + delete(r.clusterMap, cluster.GetNameSpace()) + } + + // remove finalizers + k8sutil.RemoveFinalizer(context.Background(), + r.Client, + types.NamespacedName{Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()}, + clusterObj) + + logger.Infof("curve cluster %v has been deleted successed", clusterObj.GetName()) + + return nil +} + +// reconcileCurveCluster start reconcile a CurveBS cluster +func (r *CurveClusterReconciler) reconcileCurveCluster(clusterObj *curvev1.CurveCluster, ownerInfo *clusterd.OwnerInfo) (ctrl.Result, error) { + m, ok := r.clusterMap[clusterObj.Namespace] + if !ok { + newUUID := uuid.New().String() + m = newBsClusterManager(newUUID, clusterd.KIND_CURVEBS) + } + + // construct cluster object + m.Context = r.context + m.Cluster = clusterObj + m.Logger = r.Log + m.OwnerInfo = ownerInfo + + r.clusterMap[m.GetNameSpace()] = m + m.Logger.Info("reconcileing Curve BS Cluster in namespace %q", m.GetNameSpace()) + + dcs, err := topology.ParseTopology(m) + if err != nil { + return reconcile.Result{}, err + } + + switch m.Cluster.Status.Phase { + case "": + // Update the cluster status to 'Creating' + m.Logger.Info("Curvebs accepted by operator", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + + // create a configmap to record previous config of yaml file + if err := createorUpdateRecordConfigMap(m); err != nil { + m.Logger.Error(err, "failed to create or update previous ConfigMap") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + m.Cluster.Status.Phase = curvev1.ClusterCreating + m.Cluster.Status.CurveVersion = m.Cluster.Spec.CurveVersion + // m.Cluster.Status.StorageDir.DataDir = m.Cluster.Spec.DataDir + // m.Cluster.Status.StorageDir.LogDir = m.Cluster.Spec.LogDir + if err := r.Client.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "unable to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterCreating: + // Create a new cluster and update cluster status to 'Running' + initCluster(m, dcs) + m.Logger.Info("Curvefs accepted by operator", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + + m.Cluster.Status.Phase = curvev1.ClusterRunning + if err := r.Client.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "unable to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterRunning: + // Watch the update event and update cluster stauts to specfied 'status' + // Upgrading、Updating、Scaling + + // 1. check for upgrade + if m.Cluster.Spec.CurveVersion.Image != m.Cluster.Status.CurveVersion.Image { + m.Logger.Info("Check curvefs cluster image not match, need upgrade") + m.Cluster.Status.Phase = curvev1.ClusterUpgrading + m.Cluster.Status.CurveVersion = m.Cluster.Spec.CurveVersion + } + + // TODO: 2. compare DataDir and LogDir - not implement + // if m.Cluster.Spec.DataDir != m.Cluster.Status.StorageDir.DataDir || + // m.Cluster.Spec.LogDir != m.Cluster.Status.StorageDir.LogDir { + // m.Cluster.Status.Phase = curvev1.ClusterUpdating + // m.Cluster.Status.StorageDir.DataDir = m.Cluster.Spec.DataDir + // m.Cluster.Status.StorageDir.LogDir = m.Cluster.Spec.LogDir + // } + + // 3. compare etcd and mds and metaserver config + specParameters, _ := parseSpecParameters(m) + statusParameters, err := getDataFromRecordConfigMap(m) + if err != nil { + m.Logger.Error(err, "failed to read record config from record-configmap") + return ctrl.Result{}, nil + } + statusModified := false + for role, specRolePara := range specParameters { + roleParaVar := map[string]string{} + for specPK, specPV := range specRolePara { + paraStatusVal, paraExists := statusParameters[role][specPK] + if !paraExists || paraStatusVal != specPV { + roleParaVar[specPK] = specPV + statusModified = true + } + delete(statusParameters[role], specPK) + } + // delete some parameters + if len(statusParameters[role]) > 0 { + statusModified = true + } + m.Cluster.Status.LastModContextSet.ModContextSet = append(m.Cluster.Status.LastModContextSet.ModContextSet, curvev1.ModContext{ + Role: role, + Parameters: roleParaVar, + }) + } + if statusModified { + m.Cluster.Status.Phase = curvev1.ClusterUpdating + } + + if err := r.Client.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "unable to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterUpdating: + // Update cluster and the target status is Running to watch other update events. + m.Logger.Info("Curvefs running to update", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + mcs := m.Cluster.Status.LastModContextSet.ModContextSet + if len(mcs) <= 0 { + m.Logger.Info("No Config need to update, ignore the event") + return ctrl.Result{}, nil + } + + roles2Modfing := map[string]bool{} + for _, ctx := range mcs { + roles2Modfing[ctx.Role] = true + } + // render fs-record-config ConfigMap again + if err := createorUpdateRecordConfigMap(m); err != nil { + m.Logger.Error(err, "failed to create or update previous ConfigMap") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 1. render After-Mutate-Config ConfigMap again + for role := range roles2Modfing { + for _, dc := range topology.FilterDeployConfigByRole(dcs, role) { + serviceConfigs := dc.GetProjectLayout().ServiceConfFiles + for _, conf := range serviceConfigs { + err := mutateConfig(m, dc, conf.Name) + if err != nil { + m.Logger.Error(err, "failed to render configmap again") + return ctrl.Result{}, err + } + } + } + + } + // 2. rebuild the Pods under the Deployment corresponding to the role, upgrade one by one. + // And wait for all Pods under the Deployment (only one) to be in the Ready state. + for role := range roles2Modfing { + for _, dc := range topology.FilterDeployConfigByRole(dcs, role) { + if err := service.StartService(m, dc); err != nil { + m.Logger.Error(err, "failed to update Deployment Service") + return ctrl.Result{}, err + } + } + } + + m.Cluster.Status.Phase = curvev1.ClusterRunning + m.Cluster.Status.LastModContextSet.ModContextSet = nil + if err := r.Client.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "failed to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterUpgrading: + // Upgrade cluster and the target status is Running to watch other update events. + m.Logger.Info("Curvefs running to update", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + + for _, dc := range dcs { + if err := service.StartService(m, dc); err != nil { + m.Logger.Error(err, "failed to upgrade service ", dc.GetName()) + return ctrl.Result{}, err + } + } + + m.Cluster.Status.Phase = curvev1.ClusterRunning + if err := r.Client.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "failed to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterScaling: + // Perform the scale operation. + // The target status is Running, and continue to listen to other events. + m.Cluster.Status.Phase = curvev1.ClusterRunning + if err := r.Client.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "failed to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + +func (r *CurveClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&curvev1.CurveCluster{}). + Complete(r) +} diff --git a/pkg/controllers/cleanup.go b/pkg/controllers/cleanup.go deleted file mode 100644 index 9ed44ed1..00000000 --- a/pkg/controllers/cleanup.go +++ /dev/null @@ -1,203 +0,0 @@ -package controllers - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/pkg/errors" - batch "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - - "github.com/opencurve/curve-operator/pkg/chunkserver" - "github.com/opencurve/curve-operator/pkg/clusterd" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/etcd" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/mds" - "github.com/opencurve/curve-operator/pkg/topology" -) - -const ( - CleanupAppName = "curve-cleanup" - clusterCleanUpPolicyRetryInterval = 5 * time.Second - - dataVolumeName = "data-cleanup-volume" - dataDirHostPathEnv = "CURVE_DATA_DIR_HOST_PATH" -) - -// startClusterCleanUp start job to clean hostpath -func (c *ClusterController) startClusterCleanUp(ctx clusterd.Context, namespace string, nodesForJob []v1.Node) { - if len(nodesForJob) == 0 { - logger.Info("No nodes to cleanup") - return - } - - logger.Infof("starting clean up for cluster %q", namespace) - - err := c.waitForCurveDaemonCleanUp(context.TODO(), namespace, clusterCleanUpPolicyRetryInterval) - if err != nil { - logger.Errorf("failed to wait till curve daemons are destroyed. %v", err) - return - } - - c.startCleanUpJobs(namespace, nodesForJob) -} - -func (c *ClusterController) startCleanUpJobs(namespace string, nodesForJob []v1.Node) error { - for _, node := range nodesForJob { - logger.Infof("starting clean up job on node %q", node.Name) - jobName := k8sutil.TruncateNodeNameForJob("cluster-cleanup-job-%s", node.Name) - labels := getCleanupLabels("cleanup", c.namespacedName.Namespace) - podSpec := c.cleanUpJobTemplateSpec(c.clusterMap[namespace]) - podSpec.Spec.NodeName = node.Name - job := &batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: namespace, - Labels: labels, - }, - Spec: batch.JobSpec{ - Template: podSpec, - }, - } - - if err := k8sutil.RunReplaceableJob(context.TODO(), c.context.Clientset, job, true); err != nil { - logger.Errorf("failed to run cluster clean up job on node %q. %v", node.Name, err) - return err - } - - logger.Infof("cleanup job %s has started", jobName) - } - return nil -} - -func (c *ClusterController) cleanUpJobTemplateSpec(cluster *daemon.Cluster) v1.PodTemplateSpec { - volumes := []v1.Volume{} - dataHostPathVolume := v1.Volume{Name: dataVolumeName, VolumeSource: v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: cluster.HostDataDir}}} - volumes = append(volumes, dataHostPathVolume) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: CleanupAppName, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.cleanUpJobContainer(cluster), - }, - Volumes: volumes, - RestartPolicy: v1.RestartPolicyOnFailure, - }, - } - - return podSpec -} - -func (c *ClusterController) cleanUpJobContainer(cluster *daemon.Cluster) v1.Container { - volumeMounts := []v1.VolumeMount{} - envVars := []v1.EnvVar{} - - dataHhostPathVolumeMount := v1.VolumeMount{Name: dataVolumeName, MountPath: cluster.HostDataDir} - volumeMounts = append(volumeMounts, dataHhostPathVolumeMount) - - securityContext := k8sutil.PrivilegedContext(true) - - envVars = append(envVars, []v1.EnvVar{ - {Name: dataDirHostPathEnv, Value: strings.TrimRight(cluster.HostDataDir, "/")}, - }...) - - commandLine := `rm -rf $(CURVE_DATA_DIR_HOST_PATH)/*;` - return v1.Container{ - Name: "host-cleanup", - Image: cluster.CurveVersion.Image, - ImagePullPolicy: cluster.CurveVersion.ImagePullPolicy, - Command: []string{ - "/bin/bash", - "-c", - }, - Args: []string{ - commandLine, - }, - Env: envVars, - VolumeMounts: volumeMounts, - SecurityContext: securityContext, - } -} - -func (c *ClusterController) waitForCurveDaemonCleanUp(context context.Context, namespace string, retryInterval time.Duration) error { - logger.Infof("waiting for all the curve daemons to be cleaned up in the cluster %q", namespace) - // 3 minutes(5s * 60) - maxRetryTime := 60 - retryCount := 0 - for { - retryCount++ - select { - case <-time.After(retryInterval): - curveHosts, err := c.getCurveNodes(namespace) - if err != nil { - return errors.Wrap(err, "failed to list curve daemon nodes") - } - - if len(curveHosts) == 0 { - logger.Info("all curve daemons are cleaned up") - return nil - } - - // always exit finally - if retryCount > maxRetryTime { - return errors.Errorf("cancelling the host cleanup job because of timeout") - } - - logger.Debugf("waiting for curve daemons in cluster %q to be cleaned up. Retrying in %q", - namespace, retryInterval.String()) - case <-context.Done(): - return errors.Errorf("cancelling the host cleanup job. %s", context.Err()) - } - } -} - -// getCurveNodes get all the node names where curve daemons are running -func (c *ClusterController) getCurveNodes(namespace string) ([]string, error) { - curveAppNames := []string{etcd.AppName, mds.AppName, chunkserver.AppName, chunkserver.PrepareJobName, topology.JOB_PYHSICAL_POOL, topology.JOB_LOGICAL_POOL, SyncConfigDeployment} - nodeNameList := sets.NewString() - hostNameList := []string{} - var b strings.Builder - - for _, app := range curveAppNames { - appLabelSelector := fmt.Sprintf("app=%s", app) - podList, err := c.context.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: appLabelSelector}) - if err != nil { - return hostNameList, errors.Wrapf(err, "could not list the %q pods", app) - } - - for _, curvePod := range podList.Items { - podNodeName := curvePod.Spec.NodeName - if podNodeName != "" && !nodeNameList.Has(podNodeName) { - nodeNameList.Insert(podNodeName) - } - } - fmt.Fprintf(&b, "%s: %d. ", app, len(podList.Items)) - } - - logger.Infof("existing curve daemons in the namespace %q. %s", namespace, b.String()) - - for nodeName := range nodeNameList { - podHostName, err := k8sutil.GetNodeHostName(context.TODO(), c.context.Clientset, nodeName) - if err != nil { - return nil, errors.Wrapf(err, "failed to get hostname from node %q", nodeName) - } - hostNameList = append(hostNameList, podHostName) - } - - return hostNameList, nil -} - -func getCleanupLabels(appName, namespace string) map[string]string { - labels := make(map[string]string) - labels["app"] = appName - labels["namespace"] = namespace - return labels -} diff --git a/pkg/controllers/client.go b/pkg/controllers/client.go deleted file mode 100644 index 94f080df..00000000 --- a/pkg/controllers/client.go +++ /dev/null @@ -1,158 +0,0 @@ -package controllers - -import ( - "bytes" - "context" - "fmt" - - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/remotecommand" - - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" -) - -const ( - SyncConfigDeployment = "curve-sync-config" -) - -// createSyncDeployment create a deployment for read config file -func createSyncDeployment(c *daemon.Cluster) error { - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: SyncConfigDeployment, - Labels: getReadConfigJobLabel(c), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - createSyncContainer(c), - }, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - NodeName: c.Nodes[0], - DNSPolicy: v1.DNSClusterFirstWithHostNet, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: SyncConfigDeployment, - Namespace: c.NamespacedName.Namespace, - Labels: getReadConfigJobLabel(c), - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: getReadConfigJobLabel(c), - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return err - } - var deploymentsToWaitFor []*apps.Deployment - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create deployment %s", SyncConfigDeployment) - } - logger.Infof("deployment for %s already exists. updating if needed", SyncConfigDeployment) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return err - } - } - - // update condition type and phase etc. - return nil -} - -func createSyncContainer(c *daemon.Cluster) v1.Container { - container := v1.Container{ - Name: "helper", - Command: []string{ - "/bin/bash", - }, - Args: []string{ - "-c", - "while true; do echo sync pod to read various config file from it; sleep 10;done", - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - return container -} - -func readConfigFromContainer(c *daemon.Cluster, pod v1.Pod, configPath string) (string, error) { - logger.Infof("syncing %v", configPath) - var ( - execOut bytes.Buffer - execErr bytes.Buffer - ) - req := c.Context.Clientset.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(pod.Name). - Namespace(pod.Namespace). - SubResource("exec") - req.VersionedParams(&v1.PodExecOptions{ - Container: pod.Spec.Containers[0].Name, - Command: []string{"cat", configPath}, - Stdout: true, - Stderr: true, - }, scheme.ParameterCodec) - - exec, err := remotecommand.NewSPDYExecutor(c.Context.KubeConfig, "POST", req.URL()) - if err != nil { - return "", fmt.Errorf("failed to init executor: %v", err) - } - - err = exec.Stream(remotecommand.StreamOptions{ - Stdout: &execOut, - Stderr: &execErr, - Tty: false, - }) - - if err != nil { - return "", fmt.Errorf("could not execute: %v", err) - } - - if execErr.Len() > 0 { - return "", fmt.Errorf("stderr: %v", execErr.String()) - } - - cmdOutput := execOut.String() - return cmdOutput, nil -} - -func getReadConfigJobLabel(c *daemon.Cluster) map[string]string { - labels := make(map[string]string) - labels["app"] = SyncConfigDeployment - labels["curve"] = c.Kind - return labels -} diff --git a/pkg/controllers/cluster.go b/pkg/controllers/cluster.go index 1cc125c0..0cc7134b 100644 --- a/pkg/controllers/cluster.go +++ b/pkg/controllers/cluster.go @@ -1,176 +1,132 @@ package controllers import ( - "context" - "time" - "github.com/coreos/pkg/capnslog" - "github.com/opencurve/curve-operator/pkg/chunkserver" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/etcd" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/logrotate" - "github.com/opencurve/curve-operator/pkg/mds" - "github.com/opencurve/curve-operator/pkg/metaserver" - "github.com/opencurve/curve-operator/pkg/monitor" - "github.com/opencurve/curve-operator/pkg/snapshotclone" + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/service" "github.com/opencurve/curve-operator/pkg/topology" ) var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "controller") -func newCluster(uuid, kind string, isUpgrade bool) *daemon.Cluster { - return &daemon.Cluster{ - UUID: uuid, - Kind: kind, - IsUpgrade: isUpgrade, - } -} - -func reconcileSharedServer(c *daemon.Cluster) ([]daemon.NodeInfo, []*topology.DeployConfig, error) { - // get node name and internal ip mapping - nodesInfo, err := daemon.ConfigureNodeInfo(c) - if err != nil { - return nil, nil, err - } - - err = createSyncDeployment(c) - if err != nil { - return nil, nil, err - } - time.Sleep(20 * time.Second) - - err = createDefaultConfigMap(c) - if err != nil { - return nil, nil, err - } - - if c.Monitor.Enable { - err = createGrafanaConfigMapTemplate(c) - if err != nil { - return nil, nil, err - } - } - - logger.Info("create config template configmap successfully") - - err = createReportConfigMap(c) - if err != nil { - return nil, nil, err - } +const ( + REGEX_KV_SPLIT = "^(([^%s]+)%s\\s*)([^\\s#]*)" +) - err = logrotate.CreateLogrotateConfigMap(c) - if err != nil { - return nil, nil, err +func newFsClusterManager(uuid, kind string) *clusterd.FsClusterManager { + return &clusterd.FsClusterManager{ + UUID: uuid, + Kind: kind, } +} - // Start etcd cluster - etcds := etcd.New(c) - dcs, err := etcds.Start(nodesInfo) - if err != nil { - return nil, nil, err - } - // wait until etcd election finished - time.Sleep(20 * time.Second) - - // Start Mds cluster - mds := mds.New(c) - dcs, err = mds.Start(nodesInfo, dcs) - if err != nil { - return nil, nil, err +func newBsClusterManager(uuid, kind string) *clusterd.BsClusterManager { + return &clusterd.BsClusterManager{ + UUID: uuid, + Kind: kind, } - // wait until mds election finished - time.Sleep(20 * time.Second) - - return nodesInfo, dcs, nil } -// reconcileCurveDaemons start all daemon progress of Curve -func reconcileCurveDaemons(c *daemon.Cluster) error { - // shared server - nodesInfo, dcs, err := reconcileSharedServer(c) - if err != nil { - return err - } - // chunkserver - chunkservers := chunkserver.New(c) - dcs, err = chunkservers.Start(nodesInfo, dcs) +// initCluster initialize a new cluster +func initCluster(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) error { + err := preClusterStartValidation(cluster) if err != nil { return err } - // snapshotclone - if c.SnapShotClone.Enable { - snapshotclone := snapshotclone.New(c) - dcs, err = snapshotclone.Start(nodesInfo, dcs) - if err != nil { - return err - } - } - - if c.Monitor.Enable { - monitor := monitor.New(c) - err = monitor.Start(nodesInfo, dcs) - if err != nil { - return err - } - } - - // report cluster - err = runReportCronJob(c, c.SnapShotClone.Enable) + err = reconcileCluster(cluster, dcs) if err != nil { return err } - // clean up the cluster install environment - err = cleanClusterInstallEnv(c) - if err != nil { - return err - } + return nil +} +// preClusterStartValidation cluster Spec validation +func preClusterStartValidation(cluster clusterd.Clusterer) error { return nil } -// reconcileCurveDaemons start all daemon progress of Curve -func reconcileCurveFSDaemons(c *daemon.Cluster) error { - // shared server - nodesInfo, dcs, err := reconcileSharedServer(c) - if err != nil { +func reconcileCluster(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) error { + if err := constructConfigMap(cluster, dcs); err != nil { return err } - - // metaserver - metaservers := metaserver.New(c) - dcs, err = metaservers.Start(nodesInfo, dcs) - if err != nil { + if err := reconcileCurveDaemons(cluster, dcs); err != nil { return err } + return nil +} - if c.Monitor.Enable { - monitor := monitor.New(c) - err = monitor.Start(nodesInfo, dcs) - if err != nil { +// reconcileCurveDaemons start all daemon progress of Curve of specified type +func reconcileCurveDaemons(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) error { + for _, dc := range dcs { + serviceConfigs := dc.GetProjectLayout().ServiceConfFiles + for _, conf := range serviceConfigs { + err := mutateConfig(cluster, dc, conf.Name) + if err != nil { + return err + } + } + // mutate tools.conf in configmp + if err := mutateConfig(cluster, dc, topology.LAYOUT_TOOLS_NAME); err != nil { return err } - } - // report cluster - err = runReportCronJob(c, c.SnapShotClone.Enable) - if err != nil { - return err - } + // start specified service + if err := service.StartService(cluster, dc); err != nil { + return err + } - // clean up the cluster install environment - err = cleanClusterInstallEnv(c) - if err != nil { - return err + if dc.GetKind() == topology.KIND_CURVEBS && dc.GetRole() == topology.ROLE_MDS { + // 创建物理池 + if err := service.StartJobCreatePool(cluster, dc, dcs, service.POOL_TYPE_PHYSICAL); err != nil { + return err + } + } else if dc.GetKind() == topology.KIND_CURVEBS && dc.GetRole() == topology.ROLE_CHUNKSERVER { + // 创建逻辑池 + if err := service.StartJobCreatePool(cluster, dc, dcs, service.POOL_TYPE_LOGICAL); err != nil { + return err + } + } else if dc.GetKind() == topology.KIND_CURVEFS && dc.GetRole() == topology.ROLE_MDS { + // 创建逻辑池 + if err := service.StartJobCreatePool(cluster, dc, dcs, service.POOL_TYPE_LOGICAL); err != nil { + return err + } + } } return nil } -// cleanClusterInstallEnv clean up the cluster install environment -func cleanClusterInstallEnv(c *daemon.Cluster) error { - return k8sutil.DeleteSyncConfigDeployment(context.TODO(), &c.Context, SyncConfigDeployment, c.Namespace) -} +// // reconcileCurveDaemons start all daemon progress of Curve +// func reconcileCurveFSDaemons(c *daemon.Cluster) error { +// // metaserver +// metaservers := metaserver.New(c) +// dcs, err = metaservers.Start(nodesInfo, dcs) +// if err != nil { +// return err +// } + +// if c.Monitor.Enable { +// monitor := monitor.New(c) +// err = monitor.Start(nodesInfo, dcs) +// if err != nil { +// return err +// } +// } + +// // report cluster +// err = runReportCronJob(c, c.SnapShotClone.Enable) +// if err != nil { +// return err +// } + +// // clean up the cluster install environment +// err = cleanClusterInstallEnv(c) +// if err != nil { +// return err +// } + +// return nil +// } diff --git a/pkg/controllers/curvecluster_controller.go b/pkg/controllers/curvecluster_controller.go deleted file mode 100644 index 1e46a443..00000000 --- a/pkg/controllers/curvecluster_controller.go +++ /dev/null @@ -1,235 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "path" - - "github.com/go-logr/logr" - "github.com/google/uuid" - "github.com/pkg/errors" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/clusterd" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" -) - -// ClusterController controls an instance of a Curve Cluster -type ClusterController struct { - context clusterd.Context - namespacedName types.NamespacedName - clusterMap map[string]*daemon.Cluster -} - -// CurveClusterReconciler reconciles a CurveCluster object -type CurveClusterReconciler struct { - Client client.Client - Log logr.Logger - Scheme *runtime.Scheme - - ClusterController *ClusterController -} - -func NewCurveClusterReconciler( - client client.Client, - log logr.Logger, - scheme *runtime.Scheme, - context clusterd.Context, -) *CurveClusterReconciler { - return &CurveClusterReconciler{ - Client: client, - Log: log, - Scheme: scheme, - ClusterController: &ClusterController{ - context: context, - clusterMap: make(map[string]*daemon.Cluster), - }, - } -} - -// +kubebuilder:rbac:groups=operator.curve.io,resources=curveclusters,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=operator.curve.io,resources=curveclusters/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;delete -// +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create;update;get;list;watch;delete -// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete - -func (r *CurveClusterReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - ctx := context.Background() - log := r.Log.WithValues("curvecluster", req.NamespacedName) - - log.Info("reconcileing CurveCluster") - - r.ClusterController.context.Client = r.Client - r.ClusterController.namespacedName = req.NamespacedName - - // Fetch the curveCluster instance - var curveCluster curvev1.CurveCluster - err := r.Client.Get(ctx, req.NamespacedName, &curveCluster) - if err != nil { - if kerrors.IsNotFound(err) { - // Arrive it represent the cluster has been delete - log.Error(err, "curveCluster resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, errors.Wrap(err, "failed to get curveCluster") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = AddFinalizerIfNotPresent(context.Background(), r.Client, &curveCluster) - if err != nil { - return reconcile.Result{}, err - } - - // Delete: the CR was deleted - if !curveCluster.GetDeletionTimestamp().IsZero() { - return r.reconcileDelete(&curveCluster) - } - - ownerInfo := k8sutil.NewOwnerInfo(&curveCluster, r.Scheme) - // reconcileCurveCluster func to run reconcile curve cluster - if err := r.ClusterController.reconcileCurveCluster(&curveCluster, ownerInfo); err != nil { - k8sutil.UpdateCondition(context.TODO(), &r.ClusterController.context, r.ClusterController.namespacedName, curvev1.ConditionTypeFailure, curvev1.ConditionTrue, curvev1.ConditionReconcileFailed, "Reconcile curvecluster failed") - return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile cluster %q", curveCluster.Name) - } - - k8sutil.UpdateCondition(context.TODO(), &r.ClusterController.context, r.ClusterController.namespacedName, curvev1.ConditionTypeClusterReady, curvev1.ConditionTrue, curvev1.ConditionReconcileSucceeded, "Reconcile curvecluster successed") - - return ctrl.Result{}, nil -} - -// reconcileDelete -func (r *CurveClusterReconciler) reconcileDelete(curveCluster *curvev1.CurveCluster) (reconcile.Result, error) { - log.Log.Info("delete the cluster CR now", "name", curveCluster.ObjectMeta.Name) - k8sutil.UpdateCondition(context.TODO(), &r.ClusterController.context, r.ClusterController.namespacedName, curvev1.ConditionTypeDeleting, curvev1.ConditionTrue, curvev1.ConditionDeletingClusterReason, "Reconcile curvecluster deleting") - - if curveCluster.Spec.CleanupConfirm == "Confirm" || curveCluster.Spec.CleanupConfirm == "confirm" { - daemonHosts, _ := k8sutil.GetValidDaemonHosts(r.ClusterController.context, curveCluster) - chunkserverHosts, _ := k8sutil.GetValidChunkserverHosts(r.ClusterController.context, curveCluster) - nodesForJob := k8sutil.MergeNodesOfDaemonAndChunk(daemonHosts, chunkserverHosts) - - go r.ClusterController.startClusterCleanUp(r.ClusterController.context, curveCluster.Namespace, nodesForJob) - } - - // Delete it from clusterMap - if _, ok := r.ClusterController.clusterMap[curveCluster.Namespace]; ok { - delete(r.ClusterController.clusterMap, curveCluster.Namespace) - } - // Remove finalizers - err := removeFinalizer(r.Client, r.ClusterController.namespacedName, curveCluster, "") - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove curvecluster cr finalizers") - } - - logger.Infof("curve cluster %v deleted", curveCluster.Name) - - return reconcile.Result{}, nil -} - -// reconcileCurveCluster -func (c *ClusterController) reconcileCurveCluster(clusterObj *curvev1.CurveCluster, ownerInfo *k8sutil.OwnerInfo) error { - // one cr cluster in one namespace is allowed - cluster, ok := c.clusterMap[clusterObj.Namespace] - if !ok { - logger.Info("A new curve BS Cluster will be created!!!") - newUUID := uuid.New().String() - cluster = newCluster(newUUID, config.KIND_CURVEBS, false) - // TODO: update cluster spec if the cluster has already exist! - } else { - logger.Infof("Cluster has been exist but need configured but we don't apply it now, you need delete it and recreate it!!!<->namespace=%q", cluster.Namespace) - return nil - } - - // Set the context and NameSpacedName - cluster.Context = c.context - cluster.Namespace = c.namespacedName.Namespace - cluster.NamespacedName = c.namespacedName - cluster.ObservedGeneration = clusterObj.ObjectMeta.Generation - cluster.OwnerInfo = ownerInfo - // Set the spec - cluster.Nodes = clusterObj.Spec.Nodes - cluster.CurveVersion = clusterObj.Spec.CurveVersion - cluster.Etcd = clusterObj.Spec.Etcd - cluster.Mds = clusterObj.Spec.Mds - cluster.SnapShotClone = clusterObj.Spec.SnapShotClone - cluster.Chunkserver = clusterObj.Spec.Storage - cluster.Monitor = clusterObj.Spec.Monitor - - cluster.HostDataDir = clusterObj.Spec.HostDataDir - cluster.DataDirHostPath = path.Join(clusterObj.Spec.HostDataDir, "data") - cluster.LogDirHostPath = path.Join(clusterObj.Spec.HostDataDir, "logs") - cluster.ConfDirHostPath = path.Join(clusterObj.Spec.HostDataDir, "conf") - c.clusterMap[cluster.Namespace] = cluster - - log.Log.Info("reconcileing CurveCluster in namespace", "namespace", cluster.Namespace) - - return c.initCluster(cluster) -} - -// initCluster initialize cluster info -func (c *ClusterController) initCluster(cluster *daemon.Cluster) error { - err := preClusterStartValidation(cluster) - if err != nil { - return errors.Wrap(err, "failed to preforem validation before cluster creation") - } - if cluster.Kind == config.KIND_CURVEBS { - err = reconcileCurveDaemons(cluster) - } else { - err = reconcileCurveFSDaemons(cluster) - } - if err != nil { - return err - } - - return nil -} - -// preClusterStartValidation Cluster Spec validation -func preClusterStartValidation(cluster *daemon.Cluster) error { - if cluster.Kind == config.KIND_CURVEFS { - return nil - } - nodesNum := len(cluster.Nodes) - storageNodesNum := len(cluster.Chunkserver.Devices) - if nodesNum == 1 && storageNodesNum < 3 { - return errors.Errorf(`The number of configured chunkserver devices must be greater than 3 - for CurveBS cluster stand-alone deployment %d`, nodesNum) - } - - return nil -} - -func (r *CurveClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&curvev1.CurveCluster{}). - Complete(r) -} diff --git a/pkg/controllers/curvefs_controller.go b/pkg/controllers/curvefs_controller.go deleted file mode 100644 index cd83aa83..00000000 --- a/pkg/controllers/curvefs_controller.go +++ /dev/null @@ -1,190 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "path" - - "github.com/go-logr/logr" - "github.com/google/uuid" - "github.com/pkg/errors" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/clusterd" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" -) - -// CurvefsReconciler reconciles a Curvefs object -type CurvefsReconciler struct { - client.Client - Log logr.Logger - Scheme *runtime.Scheme - - ClusterController *ClusterController -} - -func NewCurvefsReconciler( - client client.Client, - log logr.Logger, - scheme *runtime.Scheme, - context clusterd.Context, -) *CurvefsReconciler { - return &CurvefsReconciler{ - Client: client, - Log: log, - Scheme: scheme, - ClusterController: &ClusterController{ - context: context, - clusterMap: make(map[string]*daemon.Cluster), - }, - } -} - -// +kubebuilder:rbac:groups=operator.curve.io,resources=curvefs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=operator.curve.io,resources=curvefs/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;delete -// +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create;update;get;list;watch;delete -// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;update;patch -// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete - -func (r *CurvefsReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - ctx := context.Background() - logger := r.Log.WithValues("curve FS cluster", req.NamespacedName) - - logger.Info("reconcileing CurvefsCluster") - - r.ClusterController.context.Client = r.Client - r.ClusterController.namespacedName = req.NamespacedName - - var curvefsCluster curvev1.Curvefs - err := r.Client.Get(ctx, req.NamespacedName, &curvefsCluster) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Error(err, "curvefs resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - return reconcile.Result{}, errors.Wrap(err, "failed to get curvefs Cluster") - } - - // Set a finalizer so we can do cleanup before the object goes away - err = AddFinalizerIfNotPresent(context.Background(), r.Client, &curvefsCluster) - if err != nil { - return reconcile.Result{}, err - } - - // The CR was deleted - if !curvefsCluster.GetDeletionTimestamp().IsZero() { - return r.reconcileCurvefsDelete(&curvefsCluster) - } - - ownerInfo := k8sutil.NewOwnerInfo(&curvefsCluster, r.Scheme) - if err := r.ClusterController.reconcileCurvefsCluster(&curvefsCluster, ownerInfo); err != nil { - k8sutil.UpdateFSCondition(context.TODO(), &r.ClusterController.context, r.ClusterController.namespacedName, curvev1.ConditionTypeFailure, curvev1.ConditionTrue, curvev1.ConditionReconcileFailed, "Reconcile curvecluster failed") - return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile cluster %q", curvefsCluster.Name) - } - - k8sutil.UpdateFSCondition(context.TODO(), &r.ClusterController.context, r.ClusterController.namespacedName, curvev1.ConditionTypeClusterReady, curvev1.ConditionTrue, curvev1.ConditionReconcileSucceeded, "Reconcile curvecluster successed") - - return ctrl.Result{}, nil -} - -// reconcileCurvefsDelete -func (r *CurvefsReconciler) reconcileCurvefsDelete(curvefsCluster *curvev1.Curvefs) (reconcile.Result, error) { - log.Log.Info("delete the cluster CR now", "name", curvefsCluster.ObjectMeta.Name) - k8sutil.UpdateFSCondition(context.TODO(), &r.ClusterController.context, r.ClusterController.namespacedName, curvev1.ConditionTypeDeleting, curvev1.ConditionTrue, curvev1.ConditionDeletingClusterReason, "Reconcile curvecluster deleting") - - daemonHosts, _ := k8sutil.GetValidFSDaemonHosts(r.ClusterController.context, curvefsCluster) - if curvefsCluster.Spec.CleanupConfirm == "Confirm" || curvefsCluster.Spec.CleanupConfirm == "confirm" { - go r.ClusterController.startClusterCleanUp(r.ClusterController.context, curvefsCluster.Namespace, daemonHosts) - } - - // Delete it from clusterMap - if _, ok := r.ClusterController.clusterMap[curvefsCluster.Namespace]; ok { - delete(r.ClusterController.clusterMap, curvefsCluster.Namespace) - } - // Remove finalizers - err := removeFinalizer(r.Client, r.ClusterController.namespacedName, curvefsCluster, "") - if err != nil { - return reconcile.Result{}, errors.Wrap(err, "failed to remove curve fs cluster cr finalizers") - } - - logger.Infof("curve cluster %v deleted", curvefsCluster.Name) - - return reconcile.Result{}, nil -} - -// reconcileCurvefsCluster -func (c *ClusterController) reconcileCurvefsCluster(clusterObj *curvev1.Curvefs, ownerInfo *k8sutil.OwnerInfo) error { - // one cluster in one namespace is allowed - cluster, ok := c.clusterMap[clusterObj.Namespace] - if !ok { - logger.Info("A new curve FS Cluster will be created!!!") - newUUID := uuid.New().String() - cluster = newCluster(newUUID, config.KIND_CURVEFS, false) - // TODO: update cluster spec if the cluster has already exist! - } else { - logger.Infof("Cluster has been exist but need configured but we don't apply it now, you need delete it and recreate it!!!namespace=%q", cluster.Namespace) - return nil - } - - // Set the context and metadata info - cluster.Context = c.context - cluster.Namespace = c.namespacedName.Namespace - cluster.NamespacedName = c.namespacedName - cluster.ObservedGeneration = clusterObj.ObjectMeta.Generation - cluster.OwnerInfo = ownerInfo - // Set the spec - cluster.Nodes = clusterObj.Spec.Nodes - cluster.CurveVersion = clusterObj.Spec.CurveVersion - cluster.Etcd = clusterObj.Spec.Etcd - cluster.Mds = clusterObj.Spec.Mds - cluster.SnapShotClone = clusterObj.Spec.SnapShotClone - cluster.Metaserver = clusterObj.Spec.MetaServer - cluster.Monitor = clusterObj.Spec.Monitor - - // Set Host path - cluster.HostDataDir = clusterObj.Spec.HostDataDir - cluster.DataDirHostPath = path.Join(clusterObj.Spec.HostDataDir, "data") - cluster.LogDirHostPath = path.Join(clusterObj.Spec.HostDataDir, "logs") - cluster.ConfDirHostPath = path.Join(clusterObj.Spec.HostDataDir, "conf") - - c.clusterMap[cluster.Namespace] = cluster - - log.Log.Info("reconcileing Curve FS Cluster in namespace", "namespace", cluster.Namespace) - - // Start the main Curve cluster orchestration - return c.initCluster(cluster) -} - -func (r *CurvefsReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&curvev1.Curvefs{}). - Complete(r) -} diff --git a/pkg/controllers/dummy.go b/pkg/controllers/dummy.go new file mode 100644 index 00000000..eb1c23bf --- /dev/null +++ b/pkg/controllers/dummy.go @@ -0,0 +1,266 @@ +package controllers + +import ( + "bytes" + "fmt" + "path" + + apps "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" + + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/topology" + "github.com/pkg/errors" +) + +const ( + CURVE_DUMMY_SERVICE = "curve-dummy-service" + CURVE_CONFIG_TEMPLATE = "curve-config-template" +) + +func getDummyServiceLabels() map[string]string { + labels := make(map[string]string) + labels["app"] = CURVE_DUMMY_SERVICE + return labels +} + +// createSyncDeployment create a deployment for read config file +func makeDummyDeployment(c clusterd.Clusterer, dcs []*topology.DeployConfig) error { + container := v1.Container{ + Name: CURVE_DUMMY_SERVICE, + Command: []string{ + "/bin/bash", + }, + Args: []string{ + "-c", + "while true; do echo sync pod to read various config file from it; sleep 10;done", + }, + Image: dcs[0].GetContainerImage(), + ImagePullPolicy: v1.PullIfNotPresent, + Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, + } + + podSpec := v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: CURVE_DUMMY_SERVICE, + Labels: getDummyServiceLabels(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + container, + }, + RestartPolicy: v1.RestartPolicyAlways, + HostNetwork: true, + NodeName: dcs[0].GetHost(), + DNSPolicy: v1.DNSClusterFirstWithHostNet, + }, + } + + replicas := int32(1) + d := &apps.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: CURVE_DUMMY_SERVICE, + Namespace: c.GetNameSpace(), + Labels: getDummyServiceLabels(), + }, + Spec: apps.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: getDummyServiceLabels(), + }, + Template: podSpec, + Replicas: &replicas, + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + }, + }, + } + + if err := c.GetOwnerInfo().SetControllerReference(d); err != nil { + return err + } + + if _, err := k8sutil.CreateOrUpdateDeploymentAndWaitStart(c.GetContext().Clientset, d); err != nil { + return err + } + + // update condition type and phase etc. + return nil +} + +// makeTemplateConfigMap make a configmap store all config file with template value +func makeTemplateConfigMap(c clusterd.Clusterer, dcs []*topology.DeployConfig) error { + configMapData, err := getDefaultConfigMapData(c, dcs) + if err != nil { + return err + } + + cm := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CURVE_CONFIG_TEMPLATE, + Namespace: c.GetNameSpace(), + }, + Data: configMapData, + } + + err = c.GetOwnerInfo().SetControllerReference(cm) + if err != nil { + return err + } + + _, err = k8sutil.CreateOrUpdateConfigMap(c.GetContext().Clientset, cm) + if err != nil { + return err + } + + logger.Infof("create configmap %s successed", CURVE_CONFIG_TEMPLATE) + return nil +} + +// getDefaultConfigMapData read all config files with template value +func getDefaultConfigMapData(c clusterd.Clusterer, dcs []*topology.DeployConfig) (map[string]string, error) { + labels := getDummyServiceLabels() + selector := k8sutil.GetLabelSelector(labels) + pods, err := k8sutil.GetPodsByLabelSelector(c.GetContext().Clientset, c.GetNameSpace(), selector) + if err != nil { + return nil, err + } + + if len(pods.Items) != 1 { + return nil, errors.New("app=sync-config label matches no pods") + } + pod := pods.Items[0] + + role2Configs := map[string][]string{} + // distinct the same config + for _, dc := range dcs { + role2Configs[dc.GetRole()] = topology.ServiceConfigs[dc.GetRole()] + } + // for tool.conf + role2Configs["tools.conf"] = []string{ + topology.LAYOUT_TOOLS_NAME, + } + + confSrcDir := dcs[0].GetProjectLayout().ServiceConfSrcDir // /curvefs/conf + + configMapData := make(map[string]string) + for _, confNames := range role2Configs { + for _, confName := range confNames { + confSrcPath := path.Join(confSrcDir, confName) // /curvefs/conf/mds.conf + configMapData[confName], err = readConfigFromDummyPod(c, &pod, confSrcPath) + if err != nil { + return nil, err + } + } + } + + return configMapData, nil +} + +// readConfigFromDummyPod read content (config file) from dummy pod +func readConfigFromDummyPod(c clusterd.Clusterer, pod *v1.Pod, configPath string) (string, error) { + logger.Infof("syncing %v", configPath) + var ( + execOut bytes.Buffer + execErr bytes.Buffer + ) + + req := c.GetContext().Clientset.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("exec") + req.VersionedParams(&v1.PodExecOptions{ + Container: pod.Spec.Containers[0].Name, + Command: []string{"cat", configPath}, + Stdout: true, + Stderr: true, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(c.GetContext().KubeConfig, "POST", req.URL()) + if err != nil { + return "", fmt.Errorf("failed to init executor: %v", err) + } + + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: &execOut, + Stderr: &execErr, + Tty: false, + }) + + if err != nil { + return "", fmt.Errorf("could not execute: %v", err) + } + + if execErr.Len() > 0 { + return "", fmt.Errorf("stderr: %v", execErr.String()) + } + + cmdOutput := execOut.String() + return cmdOutput, nil +} + +// // createGrafanaConfigMapTemplate copy grafana dashborads source to grafana container +// func createGrafanaConfigMapTemplate(c *daemon.Cluster) error { +// labels := getReadConfigJobLabel(c) +// selector := k8sutil.GetLabelSelector(labels) +// pods, err := k8sutil.GetPodsByLabelSelector(c.Context.Clientset, c.Namespace, selector) +// if err != nil { +// return err +// } + +// if len(pods.Items) != 1 { +// return errors.New("app=sync-config label matches no pods") +// } +// pod := pods.Items[0] + +// configMapData := make(map[string]string) + +// var pathPrefix string +// var dashboards []string +// if c.Kind == config.KIND_CURVEBS { +// pathPrefix = "/curvebs/monitor/grafana" +// dashboards = GrafanaDashboardsConfigs +// } else { +// pathPrefix = "/curvefs/monitor/grafana" +// dashboards = FSGrafanaDashboardsConfigs +// } + +// for _, name := range dashboards { +// configPath := pathPrefix +// if name != "grafana.ini" { +// configPath = path.Join(pathPrefix, "/provisioning/dashboards") +// } +// configPath = path.Join(configPath, name) +// content, err := readConfigFromContainer(c, pod, configPath) +// if err != nil { +// return err +// } + +// configMapData[name] = content +// } + +// cm := &v1.ConfigMap{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: config.GrafanaDashboardsTemp, +// Namespace: c.Namespace, +// }, +// Data: configMapData, +// } + +// err = c.OwnerInfo.SetControllerReference(cm) +// if err != nil { +// return errors.Wrapf(err, "failed to set owner reference to configmap %q", config.GrafanaDashboardsTemp) +// } + +// // create configmap in cluster +// _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(cm) +// if err != nil && !kerrors.IsAlreadyExists(err) { +// return errors.Wrapf(err, "failed to create configmap %s", config.GrafanaDashboardsTemp) +// } + +// return nil +// } diff --git a/pkg/controllers/fs_controller.go b/pkg/controllers/fs_controller.go new file mode 100644 index 00000000..c7f7438c --- /dev/null +++ b/pkg/controllers/fs_controller.go @@ -0,0 +1,363 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + curvev1 "github.com/opencurve/curve-operator/api/v1" + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/service" + "github.com/opencurve/curve-operator/pkg/topology" +) + +// CurvefsReconciler reconciles a Curvefs object +type CurvefsReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + + context clusterd.Context + clusterMap map[string]*clusterd.FsClusterManager +} + +func NewCurvefsReconciler( + client client.Client, + log logr.Logger, + scheme *runtime.Scheme, + + context clusterd.Context, +) *CurvefsReconciler { + return &CurvefsReconciler{ + Client: client, + Log: log, + Scheme: scheme, + + context: context, + clusterMap: make(map[string]*clusterd.FsClusterManager), + } +} + +// +kubebuilder:rbac:groups=operator.curve.io,resources=curvefs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=operator.curve.io,resources=curvefs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;delete +// +kubebuilder:rbac:groups=core,resources=pods/exec,verbs=create;update;get;list;watch;delete +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;update;patch +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete + +func (r *CurvefsReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + logger := r.Log.WithValues("curve FS cluster", req.NamespacedName) + logger.Info("reconcileing CurvefsCluster") + + r.context.Client = r.Client + ctx := context.Background() + + curvefsCluster := &curvev1.Curvefs{} + if err := r.Client.Get(ctx, req.NamespacedName, curvefsCluster); err != nil { + logger.Error(err, "curvefs resource not found. Ignoring since object must be deleted.") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // Set a finalizer so we can do cleanup before the object goes away + if err := k8sutil.AddFinalizerIfNotPresent(ctx, r.Client, curvefsCluster); err != nil { + return reconcile.Result{}, err + } + + // k8sutil.UpdateCondition(context.TODO(), + // r.Client, + // clusterd.KIND_CURVEFS, + // req.NamespacedName, + // curvev1.ClusterPending, + // curvev1.ClusterCondition{ + // Type: curvev1.ConditionProgressing, + // Status: curvev1.ConditionStatusTrue, + // Reason: curvev1.ConditionReconcileStarted, + // Message: "start to reconcile curve fs cluster", + // }, + // ) + + // The CR was deleted + if !curvefsCluster.GetDeletionTimestamp().IsZero() { + return reconcile.Result{}, r.reconcileCurvefsDelete(curvefsCluster) + } + + ownerInfo := clusterd.NewOwnerInfo(curvefsCluster, r.Scheme) + return r.reconcileCurvefsCluster(curvefsCluster, ownerInfo) +} + +// reconcileCurvefsDelete +func (r *CurvefsReconciler) reconcileCurvefsDelete(clusterObj *curvev1.Curvefs) error { + // get currnet cluster and delete it + cluster, ok := r.clusterMap[clusterObj.GetNamespace()] + if !ok { + logger.Errorf("failed to find the cluster %q", clusterObj.GetName()) + return errors.New("internal error") + } + + dcs, err := topology.ParseTopology(cluster) + if err != nil { + return err + } + + err = service.StartClusterCleanUpJob(cluster, dcs) + if err != nil { + return err + } + + // delete it from clusterMap + if _, ok := r.clusterMap[cluster.GetNameSpace()]; ok { + delete(r.clusterMap, cluster.GetNameSpace()) + } + + // remove finalizers + k8sutil.RemoveFinalizer(context.Background(), + r.Client, + types.NamespacedName{Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()}, + clusterObj) + + logger.Infof("curve cluster %v has been deleted successed", clusterObj.GetName()) + + return nil +} + +// reconcileCurvefsCluster start reconcile a CurveFS cluster +func (r *CurvefsReconciler) reconcileCurvefsCluster(clusterObj *curvev1.Curvefs, ownerInfo *clusterd.OwnerInfo) (reconcile.Result, error) { + m, ok := r.clusterMap[clusterObj.Namespace] + if !ok { + newUUID := uuid.New().String() + m = newFsClusterManager(newUUID, clusterd.KIND_CURVEFS) + } + + // construct cluster object + m.Context = r.context + m.Cluster = clusterObj + m.Logger = r.Log + m.OwnerInfo = ownerInfo + + r.clusterMap[m.GetNameSpace()] = m + m.Logger.Info("reconcileing Curve FS Cluster in namespace %q", m.GetNameSpace()) + + dcs, err := topology.ParseTopology(m) + if err != nil { + return reconcile.Result{}, err + } + + switch m.Cluster.Status.Phase { + case "": + // Update the cluster status to 'Creating' + m.Logger.Info("Curvefs accepted by operator", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + + // create a configmap to record previous config of yaml file + if err := createorUpdateRecordConfigMap(m); err != nil { + m.Logger.Error(err, "failed to create or update previous ConfigMap") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + m.Cluster.Status.Phase = curvev1.ClusterCreating + m.Cluster.Status.CurveVersion = m.Cluster.Spec.CurveVersion + m.Cluster.Status.StorageDir.DataDir = m.Cluster.Spec.DataDir + m.Cluster.Status.StorageDir.LogDir = m.Cluster.Spec.LogDir + if err := r.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "unable to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterCreating: + // Create a new cluster and update cluster status to 'Running' + initCluster(m, dcs) + m.Logger.Info("Curvefs accepted by operator", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + + m.Cluster.Status.Phase = curvev1.ClusterRunning + if err := r.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "unable to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterRunning: + // Watch the update event and update cluster stauts to specfied 'status' + // Upgrading、Updating、Scaling + + // 1. check for upgrade + if m.Cluster.Spec.CurveVersion.Image != m.Cluster.Status.CurveVersion.Image { + m.Logger.Info("Check curvefs cluster image not match, need upgrade") + m.Cluster.Status.Phase = curvev1.ClusterUpgrading + m.Cluster.Status.CurveVersion = m.Cluster.Spec.CurveVersion + } + + // TODO: 2. compare DataDir and LogDir - not implement + // if m.Cluster.Spec.DataDir != m.Cluster.Status.StorageDir.DataDir || + // m.Cluster.Spec.LogDir != m.Cluster.Status.StorageDir.LogDir { + // m.Cluster.Status.Phase = curvev1.ClusterUpdating + // m.Cluster.Status.StorageDir.DataDir = m.Cluster.Spec.DataDir + // m.Cluster.Status.StorageDir.LogDir = m.Cluster.Spec.LogDir + // } + + // 3. compare etcd and mds and metaserver config + specParameters, _ := parseSpecParameters(m) + statusParameters, err := getDataFromRecordConfigMap(m) + if err != nil { + m.Logger.Error(err, "failed to read record config from record-configmap") + return ctrl.Result{}, nil + } + statusModified := false + for role, specRolePara := range specParameters { + roleParaVar := map[string]string{} + for specPK, specPV := range specRolePara { + paraStatusVal, paraExists := statusParameters[role][specPK] + if !paraExists || paraStatusVal != specPV { + roleParaVar[specPK] = specPV + statusModified = true + } + delete(statusParameters[role], specPK) + } + // delete some parameters + if len(statusParameters[role]) > 0 { + statusModified = true + } + m.Cluster.Status.LastModContextSet.ModContextSet = append(m.Cluster.Status.LastModContextSet.ModContextSet, curvev1.ModContext{ + Role: role, + Parameters: roleParaVar, + }) + } + if statusModified { + m.Cluster.Status.Phase = curvev1.ClusterUpdating + } + + if err := r.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "unable to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterUpdating: + // Update cluster and the target status is Running to watch other update events. + m.Logger.Info("Curvefs running to update", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + mcs := m.Cluster.Status.LastModContextSet.ModContextSet + if len(mcs) <= 0 { + m.Logger.Info("No Config need to update, ignore the event") + return ctrl.Result{}, nil + } + + roles2Modfing := map[string]bool{} + for _, ctx := range mcs { + roles2Modfing[ctx.Role] = true + } + // render fs-record-config ConfigMap again + if err := createorUpdateRecordConfigMap(m); err != nil { + m.Logger.Error(err, "failed to create or update previous ConfigMap") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + // 1. render After-Mutate-Config ConfigMap again + for role := range roles2Modfing { + for _, dc := range topology.FilterDeployConfigByRole(dcs, role) { + serviceConfigs := dc.GetProjectLayout().ServiceConfFiles + for _, conf := range serviceConfigs { + err := mutateConfig(m, dc, conf.Name) + if err != nil { + m.Logger.Error(err, "failed to render configmap again") + return ctrl.Result{}, err + } + } + } + + } + // 2. rebuild the Pods under the Deployment corresponding to the role, upgrade one by one. + // And wait for all Pods under the Deployment (only one) to be in the Ready state. + for role := range roles2Modfing { + for _, dc := range topology.FilterDeployConfigByRole(dcs, role) { + if err := service.StartService(m, dc); err != nil { + m.Logger.Error(err, "failed to update Deployment Service") + return ctrl.Result{}, err + } + } + } + + m.Cluster.Status.Phase = curvev1.ClusterRunning + m.Cluster.Status.LastModContextSet.ModContextSet = nil + if err := r.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "failed to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterUpgrading: + // Upgrade cluster and the target status is Running to watch other update events. + m.Logger.Info("Curvefs running to update", "curvefs", client.ObjectKey{ + Name: m.GetName(), + Namespace: m.GetNameSpace(), + }) + + for _, dc := range dcs { + if err := service.StartService(m, dc); err != nil { + m.Logger.Error(err, "failed to upgrade service ", dc.GetName()) + return ctrl.Result{}, err + } + } + + m.Cluster.Status.Phase = curvev1.ClusterRunning + if err := r.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "failed to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + case curvev1.ClusterScaling: + // Perform the scale operation. + // The target status is Running, and continue to listen to other events. + m.Cluster.Status.Phase = curvev1.ClusterRunning + if err := r.Status().Update(context.TODO(), m.Cluster); err != nil { + m.Logger.Error(err, "failed to update Curvefs") + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + +func (r *CurvefsReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&curvev1.Curvefs{}). + Complete(r) +} diff --git a/pkg/controllers/report.go b/pkg/controllers/report.go deleted file mode 100644 index 9d7c1786..00000000 --- a/pkg/controllers/report.go +++ /dev/null @@ -1,159 +0,0 @@ -package controllers - -import ( - "fmt" - "path" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/pkg/errors" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func createReportConfigMap(c *daemon.Cluster) error { - configMapData := map[string]string{ - config.ReportConfigMapDataKey: REPORT, - } - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.ReportConfigMapName, - Namespace: c.Namespace, - }, - Data: configMapData, - } - - err := c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return err - } - - // Create topology-json-conf configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create %s configmap in namespace %s", cm.Name, c.Namespace) - } - - logger.Info("create report configmap successfully") - return nil -} - -func runReportCronJob(c *daemon.Cluster, snapshotEnable bool) error { - reportPath := path.Join("/", c.GetKind(), config.ReportConfigMapMountPathCommon) - reportFilePath := path.Join(reportPath, config.ReportConfigMapDataKey) - vols := daemon.DaemonVolumes(config.ToolsConfigMapDataKey, config.ToolsConfigMapMountPathDir, nil, config.ToolsConfigMapName) - vols = append(vols, daemon.DaemonVolumes(config.ReportConfigMapDataKey, reportPath, nil, config.ReportConfigMapName)...) - - volMounts := daemon.DaemonVolumeMounts(config.ToolsConfigMapDataKey, config.ToolsConfigMapMountPathDir, nil, config.ToolsConfigMapName) - volMounts = append(volMounts, daemon.DaemonVolumeMounts(config.ReportConfigMapDataKey, reportPath, nil, config.ReportConfigMapName)...) - - // construct command line - commandLine := fmt.Sprintf("%s %s %s %s %s %s", "bash", reportFilePath, c.GetKind(), c.GetUUID(), config.ROLE_ETCD, "&&") + - fmt.Sprintf("%s %s %s %s %s %s", "bash", reportFilePath, c.GetKind(), c.GetUUID(), config.ROLE_MDS, "&&") - if c.GetKind() == config.KIND_CURVEBS { - commandLine += fmt.Sprintf("%s %s %s %s %s", "bash", reportFilePath, c.GetKind(), c.GetUUID(), config.ROLE_CHUNKSERVER) - } else { - commandLine += fmt.Sprintf("%s %s %s %s %s", "bash", reportFilePath, c.GetKind(), c.GetUUID(), config.ROLE_METASERVER) - } - - if c.GetKind() == config.KIND_CURVEBS && snapshotEnable { - commandLine += "&& " + fmt.Sprintf("%s %s %s %s %s", "bash", reportFilePath, c.GetKind(), c.GetUUID(), config.ROLE_SNAPSHOTCLONE) - } - - container := v1.Container{ - Name: "crontab", - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - Command: []string{ - "/bin/bash", - "-c", - commandLine, - }, - VolumeMounts: volMounts, - } - - podSpec := v1.PodSpec{ - Containers: []v1.Container{container}, - Volumes: vols, - RestartPolicy: "OnFailure", - } - - jobSpec := batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - Spec: podSpec, - }, - } - - reserverJobs := int32(1) - - cronjob := &batchv1beta1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: c.Namespace, - Name: "report-crontab", - }, - Spec: batchv1beta1.CronJobSpec{ - Schedule: "0 * * * *", - JobTemplate: batchv1beta1.JobTemplateSpec{ - Spec: jobSpec, - }, - SuccessfulJobsHistoryLimit: &reserverJobs, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(cronjob) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to cronJob to %q", cronjob.Name) - } - - // create CronJob in cluster - _, err = c.Context.Clientset.BatchV1beta1().CronJobs(c.Namespace).Create(cronjob) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create CronJob to report") - } - - return nil -} - -var REPORT = ` -function rematch() { - local s=$1 regex=$2 - if [[ $s =~ $regex ]]; then - echo "${BASH_REMATCH[1]}" - fi -} - -function fs_usage() { - curvefs_tool usage-metadata 2>/dev/null | awk 'BEGIN { - BYTES["KB"] = 1024 - BYTES["MB"] = BYTES["KB"] * 1024 - BYTES["GB"] = BYTES["MB"] * 1024 - BYTES["TB"] = BYTES["GB"] * 1024 - } - { - if ($0 ~ /all cluster/) { - printf ("%0.f", $8 * BYTES[$9]) - } - }' -} - -function bs_usage() { - local message=$(curve_ops_tool space | grep physical) - local used=$(rematch "$message" "used = ([0-9]+)GB") - echo $(($used*1024*1024*1024)) -} - -[[ -z $(which curl) ]] && apt-get install -y curl -g_kind=$1 -g_uuid=$2 -g_role=$3 -g_usage=$(([[ $g_kind = "curvebs" ]] && bs_usage) || fs_usage) -curl -XPOST http://curveadm.aspirer.wang:19302/ \ - -d "kind=$g_kind" \ - -d "uuid=$g_uuid" \ - -d "role=$g_role" \ - -d "usage=$g_usage" -` diff --git a/pkg/controllers/sync.go b/pkg/controllers/sync.go deleted file mode 100644 index 972e1fb4..00000000 --- a/pkg/controllers/sync.go +++ /dev/null @@ -1,189 +0,0 @@ -package controllers - -import ( - "path" - - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" -) - -var BSConfigs = []string{ - "etcd.conf", - "mds.conf", - "chunkserver.conf", - "snapshotclone.conf", - "snap_client.conf", - "cs_client.conf", - "s3.conf", - "nginx.conf", - "tools.conf", -} - -var FSConfigs = []string{ - "etcd.conf", - "mds.conf", - "metaserver.conf", - "tools.conf", -} - -var GrafanaDashboardsConfigs = []string{ - "all.yml", - "chunkserver.json", - "client.json", - "etcd.json", - "mds.json", - "report.json", - "snapshotcloneserver.json", - "grafana.ini", -} - -var FSGrafanaDashboardsConfigs = []string{ - "all.yml", - "client.json", - "etcd.json", - "mds.json", - "metaserver.json", - "grafana.ini", -} - -// getDefaultConfigMapData -func getDefaultConfigMapData(c *daemon.Cluster) (map[string]string, error) { - labels := getReadConfigJobLabel(c) - selector := k8sutil.GetLabelSelector(labels) - pods, err := k8sutil.GetPodsByLabelSelector(c.Context.Clientset, c.Namespace, selector) - if err != nil { - return nil, err - } - - if len(pods.Items) != 1 { - return nil, errors.New("app=sync-config label matches no pods") - } - pod := pods.Items[0] - // for debug - logger.Infof("sync-config pod is %q", pod.Name) - - var configs []string - var configPath string - if c.Kind == config.KIND_CURVEBS { - configs = BSConfigs - configPath = "/curvebs/conf/" - } else { - configs = FSConfigs - configPath = "/curvefs/conf/" - } - logger.Infof("current cluster kind is %q", c.Kind) - logger.Infof("start syncing config from container %v", configs) - - configMapData := make(map[string]string) - for _, name := range configs { - configName := path.Join(configPath, name) - content, err := readConfigFromContainer(c, pod, configName) - if err != nil { - return nil, err - } - configMapData[name] = content - } - - return configMapData, nil -} - -// createDefaultConfigMap -func createDefaultConfigMap(c *daemon.Cluster) error { - configMapData, err := getDefaultConfigMapData(c) - if err != nil { - return err - } - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.DefaultConfigMapName, - Namespace: c.Namespace, - }, - Data: configMapData, - } - - err = c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to configmap %q", config.DefaultConfigMapName) - } - - // for debug - // log.Infof("namespace=%v", c.namespacedName.Namespace) - - // create configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create configmap %s", config.DefaultConfigMapName) - } - - logger.Infof("create configmap %q successed", config.DefaultConfigMapName) - return nil -} - -// createGrafanaConfigMapTemplate copy grafana dashborads source to grafana container -func createGrafanaConfigMapTemplate(c *daemon.Cluster) error { - labels := getReadConfigJobLabel(c) - selector := k8sutil.GetLabelSelector(labels) - pods, err := k8sutil.GetPodsByLabelSelector(c.Context.Clientset, c.Namespace, selector) - if err != nil { - return err - } - - if len(pods.Items) != 1 { - return errors.New("app=sync-config label matches no pods") - } - pod := pods.Items[0] - - configMapData := make(map[string]string) - - var pathPrefix string - var dashboards []string - if c.Kind == config.KIND_CURVEBS { - pathPrefix = "/curvebs/monitor/grafana" - dashboards = GrafanaDashboardsConfigs - } else { - pathPrefix = "/curvefs/monitor/grafana" - dashboards = FSGrafanaDashboardsConfigs - } - - for _, name := range dashboards { - configPath := pathPrefix - if name != "grafana.ini" { - configPath = path.Join(pathPrefix, "/provisioning/dashboards") - } - configPath = path.Join(configPath, name) - content, err := readConfigFromContainer(c, pod, configPath) - if err != nil { - return err - } - - configMapData[name] = content - } - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.GrafanaDashboardsTemp, - Namespace: c.Namespace, - }, - Data: configMapData, - } - - err = c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to configmap %q", config.GrafanaDashboardsTemp) - } - - // create configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create configmap %s", config.GrafanaDashboardsTemp) - } - - return nil -} diff --git a/pkg/controllers/util.go b/pkg/controllers/util.go new file mode 100644 index 00000000..7b2bdfda --- /dev/null +++ b/pkg/controllers/util.go @@ -0,0 +1,275 @@ +package controllers + +import ( + "bufio" + "fmt" + "regexp" + "strconv" + "strings" + + curvev1 "github.com/opencurve/curve-operator/api/v1" + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/topology" + "github.com/opencurve/curve-operator/pkg/utils" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + BS_RECORD_CONFIGMAP = "bs-record-config" + FS_RECORD_CONFIGMAP = "fs-record-config" +) + +var roles = []string{ + topology.ROLE_ETCD, + topology.ROLE_MDS, + topology.ROLE_CHUNKSERVER, + topology.ROLE_SNAPSHOTCLONE, + topology.ROLE_METASERVER, +} + +func fmtParameter(k, v interface{}) string { + return fmt.Sprintf("%s=%s", k, v) +} + +func parseSpecParameters(cluster clusterd.Clusterer) (map[string]map[string]string, map[string]string) { + parameters := map[string]map[string]string{} + data := map[string]string{} + + for _, role := range roles { + specRolePara := map[string]string{} + roleParaLine := []string{} + switch role { + case topology.ROLE_ETCD: + roleParaLine = append(roleParaLine, fmtParameter(curvev1.CLIENT_PORT, *cluster.GetEtcdSpec().ClientPort)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.PEER_PORT, *cluster.GetEtcdSpec().PeerPort)) + specRolePara[curvev1.CLIENT_PORT] = strconv.Itoa(*cluster.GetEtcdSpec().ClientPort) + specRolePara[curvev1.PEER_PORT] = strconv.Itoa(*cluster.GetEtcdSpec().PeerPort) + case topology.ROLE_MDS: + roleParaLine = append(roleParaLine, fmtParameter(curvev1.PORT, *cluster.GetMdsSpec().Port)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.DUMMY_PORT, *cluster.GetMdsSpec().DummyPort)) + specRolePara[curvev1.PORT] = strconv.Itoa(*cluster.GetMdsSpec().Port) + specRolePara[curvev1.DUMMY_PORT] = strconv.Itoa(*cluster.GetMdsSpec().DummyPort) + + } + if role == topology.ROLE_CHUNKSERVER && cluster.GetKind() == topology.KIND_CURVEBS { + roleParaLine = append(roleParaLine, fmtParameter(curvev1.PORT, *cluster.GetChunkserverSpec().Port)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.INSTANCES, cluster.GetChunkserverSpec().Instances)) + specRolePara[curvev1.PORT] = strconv.Itoa(*cluster.GetChunkserverSpec().Port) + specRolePara[curvev1.INSTANCES] = strconv.Itoa(cluster.GetChunkserverSpec().Instances) + } + + if role == topology.ROLE_SNAPSHOTCLONE && cluster.GetKind() == topology.KIND_CURVEBS { + roleParaLine = append(roleParaLine, fmtParameter(curvev1.PORT, *cluster.GetSnapShotSpec().Port)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.DUMMY_PORT, *cluster.GetSnapShotSpec().DummyPort)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.PROXY_PORT, *cluster.GetSnapShotSpec().ProxyPort)) + specRolePara[curvev1.PORT] = strconv.Itoa(*cluster.GetSnapShotSpec().Port) + specRolePara[curvev1.DUMMY_PORT] = strconv.Itoa(*cluster.GetSnapShotSpec().DummyPort) + specRolePara[curvev1.PROXY_PORT] = strconv.Itoa(*cluster.GetSnapShotSpec().ProxyPort) + } + + if role == topology.ROLE_METASERVER && cluster.GetKind() == topology.KIND_CURVEFS { + roleParaLine = append(roleParaLine, fmtParameter(curvev1.PORT, *cluster.GetMetaserverSpec().Port)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.EXTERNAL_PORT, *cluster.GetMetaserverSpec().ExternalPort)) + roleParaLine = append(roleParaLine, fmtParameter(curvev1.INSTANCES, cluster.GetMetaserverSpec().Instances)) + specRolePara[curvev1.PORT] = strconv.Itoa(*cluster.GetMetaserverSpec().Port) + specRolePara[curvev1.EXTERNAL_PORT] = strconv.Itoa(*cluster.GetMetaserverSpec().ExternalPort) + specRolePara[curvev1.INSTANCES] = strconv.Itoa(cluster.GetMetaserverSpec().Instances) + } + + for key, val := range cluster.GetRoleConfigs(role) { + roleParaLine = append(roleParaLine, fmtParameter(key, val)) + specRolePara[key] = val + } + content := strings.Join(roleParaLine, "\n") + data[role] = content + parameters[role] = specRolePara + } + + return parameters, data +} + +func createorUpdateRecordConfigMap(cluster clusterd.Clusterer) error { + configmapName := topology.Choose( + cluster.GetKind() == topology.KIND_CURVEBS, BS_RECORD_CONFIGMAP, FS_RECORD_CONFIGMAP) + _, mapStringData := parseSpecParameters(cluster) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configmapName, + Namespace: cluster.GetNameSpace(), + }, + Data: mapStringData, + } + + err := cluster.GetOwnerInfo().SetControllerReference(cm) + if err != nil { + return err + } + + _, err = k8sutil.CreateOrUpdateConfigMap(cluster.GetContext().Clientset, cm) + if err != nil { + return err + } + + return nil +} + +// getDataFromRecordConfigMap reads data from the ConfigMap of the record and returns the data for formatting +func getDataFromRecordConfigMap(cluster clusterd.Clusterer) (map[string]map[string]string, error) { + configmapName := topology.Choose( + cluster.GetKind() == topology.KIND_CURVEBS, BS_RECORD_CONFIGMAP, FS_RECORD_CONFIGMAP) + + cm, err := k8sutil.GetConfigMapByName(cluster.GetContext().Clientset, cluster.GetNameSpace(), configmapName) + if err != nil { + return nil, err + } + allData := map[string]map[string]string{} + for key, value := range cm.Data { + oneroleConfig := map[string]string{} + lines := strings.Split(value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if len(line) == 0 || !strings.Contains(line, "=") { + continue + } + parts := strings.Split(line, "=") + + if len(parts) >= 2 { + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + + oneroleConfig[key] = value + } + } + + allData[key] = oneroleConfig + } + + return allData, nil +} + +// constructTemplateConfigMap start a dummy Deployment service and read template config file to a ConfigMap +func constructConfigMap(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) error { + if err := makeDummyDeployment(cluster, dcs); err != nil { + return err + } + + if err := makeTemplateConfigMap(cluster, dcs); err != nil { + return err + } + + if _, err := makeMutateConfigMap(cluster); err != nil { + return err + } + + return nil +} + +func makeMutateConfigMap(cluster clusterd.Clusterer) (*corev1.ConfigMap, error) { + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: utils.AFTER_MUTATE_CONF, + Namespace: cluster.GetNameSpace(), + }, + Data: map[string]string{}, + } + + err := cluster.GetOwnerInfo().SetControllerReference(cm) + if err != nil { + return nil, err + } + + _, err = k8sutil.CreateOrUpdateConfigMap(cluster.GetContext().Clientset, cm) + if err != nil { + return nil, err + } + + return cm, nil +} + +func mutateConfig(cluster clusterd.Clusterer, dc *topology.DeployConfig, name string) error { + templateCM, err := cluster.GetContext().Clientset.CoreV1().ConfigMaps(cluster.GetNameSpace()).Get(CURVE_CONFIG_TEMPLATE, metav1.GetOptions{}) + if err != nil { + return err + } + afterMutateCM, err := cluster.GetContext().Clientset.CoreV1().ConfigMaps(cluster.GetNameSpace()).Get(utils.AFTER_MUTATE_CONF, metav1.GetOptions{}) + if err != nil { + return err + } + + input := templateCM.Data[name] + + var key, value string + output := []string{} + scanner := bufio.NewScanner(strings.NewReader(input)) + for scanner.Scan() { + in := scanner.Text() + err := kvFilter(dc, in, &key, &value) + if err != nil { + return err + } + out, err := mutate(dc, in, key, value, name) + if err != nil { + return err + } + + output = append(output, out) + } + content := strings.Join(output, "\n") + afterKey := fmt.Sprintf("%s_%s", dc.GetName(), name) + afterMutateCM.Data[afterKey] = content + + _, err = k8sutil.UpdateConfigMap(cluster.GetContext().Clientset, afterMutateCM) + if err != nil { + return err + } + + return nil +} + +func kvFilter(dc *topology.DeployConfig, line string, key, value *string) error { + pattern := fmt.Sprintf(REGEX_KV_SPLIT, strings.TrimSpace(dc.GetConfigKvFilter()), dc.GetConfigKvFilter()) + regex, err := regexp.Compile(pattern) + if err != nil { + return errors.New("failed to build regex") + } + + mu := regex.FindStringSubmatch(line) + if len(mu) == 0 { + *key = "" + *value = "" + } else { + *key = mu[2] + *value = mu[3] + } + + return nil +} + +func mutate(dc *topology.DeployConfig, in, key, value string, name string) (out string, err error) { + if len(key) == 0 { + out = in + if name == "nginx.conf" { // only for nginx.conf + out, err = dc.GetVariables().Rendering(in) + } + return + } + + // replace config + v, ok := dc.GetServiceConfig()[strings.ToLower(key)] + if ok { + value = v + } + + // replace variable + value, err = dc.GetVariables().Rendering(value) + if err != nil { + return + } + + return +} diff --git a/pkg/daemon/cluster.go b/pkg/daemon/cluster.go deleted file mode 100644 index 8635247d..00000000 --- a/pkg/daemon/cluster.go +++ /dev/null @@ -1,36 +0,0 @@ -package daemon - -import ( - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/clusterd" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "k8s.io/apimachinery/pkg/types" -) - -type Cluster struct { - UUID string - Kind string - Context clusterd.Context - Namespace string - NamespacedName types.NamespacedName - ObservedGeneration int64 - OwnerInfo *k8sutil.OwnerInfo - IsUpgrade bool - - Nodes []string - CurveVersion curvev1.CurveVersionSpec - Etcd curvev1.EtcdSpec - Mds curvev1.MdsSpec - SnapShotClone curvev1.SnapShotCloneSpec - Chunkserver curvev1.StorageScopeSpec - Metaserver curvev1.MetaServerSpec - Monitor curvev1.MonitorSpec - - HostDataDir string - DataDirHostPath string - LogDirHostPath string - ConfDirHostPath string -} - -func (c *Cluster) GetUUID() string { return c.UUID } -func (c *Cluster) GetKind() string { return c.Kind } diff --git a/pkg/daemon/gen_conf.go b/pkg/daemon/gen_conf.go deleted file mode 100644 index 1e9755eb..00000000 --- a/pkg/daemon/gen_conf.go +++ /dev/null @@ -1,131 +0,0 @@ -package daemon - -import ( - "emperror.dev/errors" - "github.com/coreos/pkg/capnslog" - "github.com/opencurve/curve-operator/pkg/config" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "daemon") - -// CreateSpecRoleAllConfigMap create configmap of role to store all config need by start role server. -func (c *Cluster) CreateSpecRoleAllConfigMap(role, configMapName string) error { - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: c.Namespace, - }, - Data: map[string]string{ - "role": role, - }, - } - - err := c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return err - } - - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(cm) - if err != nil { - return errors.Wrapf(err, "failed to create configmap %q", configMapName) - } - - return nil -} - -// updateSpecRoleAllConfigMap update configmap of role to store all config need by start role server. -func (c *Cluster) UpdateSpecRoleAllConfigMap(configMapName, configMapDataKey, configMapDataVal string, conf config.ConfigInterface) error { - var value string - if configMapDataVal != "" || len(configMapDataVal) != 0 { - value = configMapDataVal - } else { - // get curve-conf-default configmap from cluster - defaultConfigMap, err := c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(config.DefaultConfigMapName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to get configmap [ %s ] from cluster", config.DefaultConfigMapName) - if kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get configmap [ %s ] from cluster", config.DefaultConfigMapName) - } - return errors.Wrapf(err, "failed to get configmap [ %s ] from cluster", config.DefaultConfigMapName) - } - - defaultDataVal := defaultConfigMap.Data[configMapDataKey] - // replace ${} to specific parameters - value, err = config.ReplaceConfigVars(defaultDataVal, conf) - if err != nil { - return err - } - } - - // update the data of configmap 'chunkserver-all-config' or snapshot-all-config - cm, err := c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(configMapName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to get configmap [ %s ] from cluster", configMapName) - if kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get configmap [ %s ] from cluster", configMapName) - } - return errors.Wrapf(err, "failed to get configmap [ %s ] from cluster", configMapName) - } - data := cm.Data - data[configMapDataKey] = value - - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Update(cm) - if err != nil { - return errors.Wrapf(err, "failed to update configmap %q", configMapName) - } - logger.Infof("add key %q to configmap %q successed", configMapDataKey, configMapName) - - return nil -} - -// createConfigMap create each configmap -func (c *Cluster) CreateEachConfigMap(configMapDataKey string, conf config.ConfigInterface, currentConfigMapName string) error { - // get curve-conf-default configmap from cluster - defaultConfigMap, err := c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Get(config.DefaultConfigMapName, metav1.GetOptions{}) - if err != nil { - logger.Errorf("failed to get configmap [ %s ] from cluster", config.DefaultConfigMapName) - if kerrors.IsNotFound(err) { - return errors.Wrapf(err, "failed to get configmap [ %s ] from cluster", config.DefaultConfigMapName) - } - return errors.Wrapf(err, "failed to get configmap [ %s ] from cluster", config.DefaultConfigMapName) - } - - // get configmap data - configData := defaultConfigMap.Data[configMapDataKey] - // replace ${} to specific parameters - replacedData, err := config.ReplaceConfigVars(configData, conf) - if err != nil { - return err - } - - // create curve-(role)-conf-[a,b,...] configmap for each one deployment - configMapData := map[string]string{ - configMapDataKey: replacedData, - } - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentConfigMapName, - Namespace: c.Namespace, - }, - Data: configMapData, - } - - err = c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return err - } - - // create configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create configmap %s", currentConfigMapName) - } - - logger.Infof("create configmap %q successed", currentConfigMapName) - - return nil -} diff --git a/pkg/daemon/labels.go b/pkg/daemon/labels.go deleted file mode 100644 index fd9b4e3b..00000000 --- a/pkg/daemon/labels.go +++ /dev/null @@ -1,32 +0,0 @@ -package daemon - -const ( - AppAttr = "app" - ClusterAttr = "curve_cluster" - daemonTypeLabel = "curve_daemon_type" - DaemonIDLabel = "ceph_daemon_id" - ResourceKind = "kind" -) - -// AppLabels returns labels common for all Rook-Ceph applications which may be useful for admins. -// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc. -func AppLabels(appName, namespace string) map[string]string { - return map[string]string{ - AppAttr: appName, - ClusterAttr: namespace, - } -} - -// CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins. -// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc -// Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw" -// Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c" -// ResourceKind is the CR type: "CephCluster", "CephFilesystem", etc -func CephDaemonAppLabels(appName, namespace, daemonType, daemonID, resourceKind string) map[string]string { - labels := AppLabels(appName, namespace) - labels[daemonTypeLabel] = daemonType - labels[DaemonIDLabel] = daemonID - labels[daemonType] = daemonID - labels[ResourceKind] = resourceKind - return labels -} diff --git a/pkg/daemon/node_info.go b/pkg/daemon/node_info.go deleted file mode 100644 index 77d0b186..00000000 --- a/pkg/daemon/node_info.go +++ /dev/null @@ -1,84 +0,0 @@ -package daemon - -import "github.com/opencurve/curve-operator/pkg/k8sutil" - -type NodeInfo struct { - NodeName string - NodeIP string - HostID int - ReplicasSequence int - PeerPort int // etcd - ClientPort int // etcd - MdsPort int // mds - DummyPort int // mds - SnapshotClonePort int // snapshotclone - SnapshotCloneDummyPort int // snapshotclone - SnapshotCloneProxyPort int // snapshotclone - MetaserverPort int // metaserver - MetaserverExternalPort int // metaserver - StandAlone bool -} - -func ConfigureNodeInfo(c *Cluster) ([]NodeInfo, error) { - nodeNameIP, err := k8sutil.GetNodeInfoMap(c.Nodes, c.Context.Clientset) - if err != nil { - return nil, err - } - - var ( - peerPort, clientPort int - mdsPort, dummyPort int - snapshotClonePort, snapshotCloneDummyPort, snapshotCloneProxyPort int - metaserverPort, metaserverExternalPort int - prevNodeName string - nodesInfo []NodeInfo - ) - hostID, replicasSequence := -1, -1 - standAlone := false - // The order of node has been determined - for _, node := range nodeNameIP { - hostID++ - if node.NodeName == prevNodeName { - standAlone = true - replicasSequence++ - peerPort++ - clientPort++ - mdsPort++ - dummyPort++ - snapshotClonePort++ - snapshotCloneDummyPort++ - snapshotCloneProxyPort++ - metaserverPort++ - metaserverExternalPort++ - } else { - replicasSequence = 0 - peerPort = c.Etcd.PeerPort - clientPort = c.Etcd.ClientPort - mdsPort = c.Mds.Port - dummyPort = c.Mds.DummyPort - snapshotClonePort = c.SnapShotClone.Port - snapshotCloneDummyPort = c.SnapShotClone.DummyPort - snapshotCloneProxyPort = c.SnapShotClone.ProxyPort - metaserverPort = c.Metaserver.Port - metaserverExternalPort = c.Metaserver.ExternalPort - } - prevNodeName = node.NodeName - nodesInfo = append(nodesInfo, NodeInfo{ - NodeName: node.NodeName, - NodeIP: node.NodeIP, - HostID: hostID, - ReplicasSequence: replicasSequence, - PeerPort: peerPort, - ClientPort: clientPort, - MdsPort: mdsPort, - DummyPort: dummyPort, - SnapshotClonePort: snapshotClonePort, - SnapshotCloneDummyPort: snapshotCloneDummyPort, - SnapshotCloneProxyPort: snapshotCloneProxyPort, - MetaserverPort: metaserverPort, - MetaserverExternalPort: metaserverExternalPort, - StandAlone: standAlone, - }) - } - return nodesInfo, nil -} diff --git a/pkg/daemon/volume.go b/pkg/daemon/volume.go deleted file mode 100644 index 044ce28d..00000000 --- a/pkg/daemon/volume.go +++ /dev/null @@ -1,87 +0,0 @@ -package daemon - -import ( - "strings" - - v1 "k8s.io/api/core/v1" - - "github.com/opencurve/curve-operator/pkg/config" -) - -// DaemonVolumes returns the pod volumes used by all Curve daemons. -func DaemonVolumes(configMapDataKey string, configMapMountPathDir string, dataPaths *config.DataPathMap, curConfigMapName string) []v1.Volume { - // create configmap volume - vols := []v1.Volume{} - if curConfigMapName != "" { - configVol, _ := configConfigMapVolumeAndMount(configMapDataKey, configMapMountPathDir, curConfigMapName) - vols = append(vols, configVol) - } - - // create Data hostpath volume and log hostpath volume - hostPathType := v1.HostPathDirectoryOrCreate - if dataPaths != nil && dataPaths.HostDataDir != "" { - src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostDataDir, Type: &hostPathType}} - vols = append(vols, v1.Volume{Name: "data-volume", VolumeSource: src}) - } - - if dataPaths != nil && dataPaths.HostLogDir != "" { - src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostLogDir, Type: &hostPathType}} - vols = append(vols, v1.Volume{Name: "log-volume", VolumeSource: src}) - } - - return vols -} - -// DaemonVolumeMounts returns the pod container volumeMounts used by Curve daemon -func DaemonVolumeMounts(configMapDataKey string, configMapMountPathDir string, dataPaths *config.DataPathMap, curConfigMapName string) []v1.VolumeMount { - // create configmap mount path - mounts := []v1.VolumeMount{} - if curConfigMapName != "" { - _, configMapMount := configConfigMapVolumeAndMount(configMapDataKey, configMapMountPathDir, curConfigMapName) - mounts = append(mounts, configMapMount) - } - - // create data mount path and log mount path on container - if dataPaths != nil && dataPaths.ContainerDataDir != "" { - mounts = append(mounts, v1.VolumeMount{Name: "data-volume", MountPath: dataPaths.ContainerDataDir}) - } - - if dataPaths != nil && dataPaths.ContainerLogDir != "" { - mounts = append(mounts, v1.VolumeMount{Name: "log-volume", MountPath: dataPaths.ContainerLogDir}) - } - - return mounts -} - -// configConfigMapVolumeAndMount Create configmap volume and volume mount for daemon pod -func configConfigMapVolumeAndMount(configMapDataKey string, configMapMountPathDir string, curConfigMapName string) (v1.Volume, v1.VolumeMount) { - configMapVolSource := &v1.ConfigMapVolumeSource{} - mode := int32(0644) - if configMapDataKey == "" { - configMapVolSource = &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: curConfigMapName}, - } - } else { - configMapVolSource = &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: curConfigMapName}, - Items: []v1.KeyToPath{{Key: configMapDataKey, Path: configMapDataKey, Mode: &mode}}, - } - } - - volumeName := curConfigMapName + strings.Split(configMapDataKey, ".")[0] - configVol := v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - ConfigMap: configMapVolSource, - }, - } - - // configmap volume mount path - m := v1.VolumeMount{ - Name: volumeName, - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: configMapMountPathDir, - } - - return configVol, m -} diff --git a/pkg/etcd/config.go b/pkg/etcd/config.go deleted file mode 100644 index 5c29e227..00000000 --- a/pkg/etcd/config.go +++ /dev/null @@ -1,53 +0,0 @@ -package etcd - -import "github.com/opencurve/curve-operator/pkg/config" - -// etcdConfig implements config.ConfigInterface -var _ config.ConfigInterface = &etcdConfig{} - -// etcdConfig for a single etcd -type etcdConfig struct { - Prefix string - ServiceRole string - ServiceHostSequence string - ServiceReplicaSequence string - ServiceAddr string - ServicePort string - ServiceClientPort string - ClusterEtcdHttpAddr string - - ResourceName string - CurrentConfigMapName string - DaemonID string - ConfigMapMountPath string - DataPathMap *config.DataPathMap -} - -func (c *etcdConfig) GetPrefix() string { return c.Prefix } -func (c *etcdConfig) GetServiceId() string { return "" } -func (c *etcdConfig) GetServiceRole() string { return c.ServiceRole } -func (c *etcdConfig) GetServiceHost() string { return "" } -func (c *etcdConfig) GetServiceHostSequence() string { return c.ServiceHostSequence } -func (c *etcdConfig) GetServiceReplicaSequence() string { return c.ServiceReplicaSequence } -func (c *etcdConfig) GetServiceReplicasSequence() string { return "" } -func (c *etcdConfig) GetServiceAddr() string { return c.ServiceAddr } -func (c *etcdConfig) GetServicePort() string { return c.ServicePort } -func (c *etcdConfig) GetServiceClientPort() string { return c.ServiceClientPort } -func (c *etcdConfig) GetServiceDummyPort() string { return "" } -func (c *etcdConfig) GetServiceProxyPort() string { return "" } -func (c *etcdConfig) GetServiceExternalAddr() string { return "" } -func (c *etcdConfig) GetServiceExternalPort() string { return "" } -func (c *etcdConfig) GetLogDir() string { return "" } -func (c *etcdConfig) GetDataDir() string { return "" } - -func (c *etcdConfig) GetClusterEtcdHttpAddr() string { return c.ClusterEtcdHttpAddr } -func (c *etcdConfig) GetClusterEtcdAddr() string { return "" } -func (c *etcdConfig) GetClusterMdsAddr() string { return "" } -func (c *etcdConfig) GetClusterMdsDummyAddr() string { return "" } -func (c *etcdConfig) GetClusterMdsDummyPort() string { return "" } -func (c *etcdConfig) GetClusterChunkserverAddr() string { return "" } -func (c *etcdConfig) GetClusterMetaserverAddr() string { return "" } -func (c *etcdConfig) GetClusterSnapshotcloneAddr() string { return "" } -func (c *etcdConfig) GetClusterSnapshotcloneProxyAddr() string { return "" } -func (c *etcdConfig) GetClusterSnapshotcloneDummyPort() string { return "" } -func (c *etcdConfig) GetClusterSnapshotcloneNginxUpstream() string { return "" } diff --git a/pkg/etcd/etcd.go b/pkg/etcd/etcd.go deleted file mode 100644 index 5720bfaf..00000000 --- a/pkg/etcd/etcd.go +++ /dev/null @@ -1,158 +0,0 @@ -package etcd - -import ( - "context" - "fmt" - "path" - "strconv" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" -) - -const ( - AppName = "curve-etcd" - ConfigMapNamePrefix = "curve-etcd-conf" - - // ContainerPath is the mount path of data and log - Prefix = "/curvebs/etcd" - ContainerDataDir = "/curvebs/etcd/data" - ContainerLogDir = "/curvebs/etcd/logs" - - FSPrefix = "/curvefs/etcd" - FSContainerDataDir = "/curvefs/etcd/data" - FSContainerLogDir = "/curvefs/etcd/logs" -) - -type Cluster struct { - *daemon.Cluster -} - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "etcd") - -func New(c *daemon.Cluster) *Cluster { - return &Cluster{Cluster: c} -} - -// Start begins the process of running a cluster of curve etcds. -func (c *Cluster) Start(nodesInfo []daemon.NodeInfo) ([]*topology.DeployConfig, error) { - var etcdEndpoints, clusterEtcdAddr, initialCluster string - for _, node := range nodesInfo { - etcdEndpoints = fmt.Sprint(etcdEndpoints, node.NodeIP, ":", node.PeerPort, ",") - clusterEtcdAddr = fmt.Sprint(clusterEtcdAddr, node.NodeIP, ":", node.ClientPort, ",") - initialCluster = fmt.Sprint(initialCluster, "etcd", strconv.Itoa(node.HostID), strconv.Itoa(node.ReplicasSequence), "=http://", node.NodeIP, ":", node.PeerPort, ",") - } - etcdEndpoints = strings.TrimRight(etcdEndpoints, ",") - clusterEtcdAddr = strings.TrimRight(clusterEtcdAddr, ",") - initialCluster = strings.TrimRight(initialCluster, ",") - logger.Infof("initialCluster %v", initialCluster) - - // Create etcd override configmap - if err := c.createOverrideConfigMap(etcdEndpoints, clusterEtcdAddr); err != nil { - return nil, err - } - - // create ConfigMap and referred Deployment by travel all nodes that have been labeled - "app=etcd" - var configMapMountPath, prefix, containerDataDir, containerLogDir string - if c.Kind == config.KIND_CURVEBS { - prefix = Prefix - containerDataDir = ContainerDataDir - containerLogDir = ContainerLogDir - configMapMountPath = config.EtcdConfigMapMountPathDir - } else { - prefix = FSPrefix - containerDataDir = FSContainerDataDir - containerLogDir = FSContainerLogDir - configMapMountPath = config.FSEtcdConfigMapMountPathDir - } - - var deploymentsToWaitFor []*appsv1.Deployment - var dcs []*topology.DeployConfig - - for _, node := range nodesInfo { - daemonIDString := k8sutil.IndexToName(node.HostID) - // Construct etcd config to pass to make deployment - resourceName := fmt.Sprintf("%s-%s", AppName, daemonIDString) - currentConfigMapName := fmt.Sprintf("%s-%s", ConfigMapNamePrefix, daemonIDString) - etcdConfig := &etcdConfig{ - Prefix: prefix, - ServiceRole: config.ROLE_ETCD, - ServiceHostSequence: strconv.Itoa(node.HostID), - ServiceReplicaSequence: strconv.Itoa(node.ReplicasSequence), - ServiceAddr: node.NodeIP, - ServicePort: strconv.Itoa(node.PeerPort), - ServiceClientPort: strconv.Itoa(node.ClientPort), - ClusterEtcdHttpAddr: initialCluster, - - DaemonID: daemonIDString, - CurrentConfigMapName: currentConfigMapName, - ResourceName: resourceName, - DataPathMap: config.NewDaemonDataPathMap( - path.Join(c.DataDirHostPath, fmt.Sprint("etcd-", daemonIDString)), - path.Join(c.LogDirHostPath, fmt.Sprint("etcd-", daemonIDString)), - containerDataDir, - containerLogDir, - ), - ConfigMapMountPath: configMapMountPath, - } - dc := &topology.DeployConfig{ - Kind: c.Kind, - Role: config.ROLE_ETCD, - NodeName: node.NodeName, - NodeIP: node.NodeIP, - Port: node.ClientPort, - ReplicasSequence: node.ReplicasSequence, - Replicas: len(c.Nodes), - StandAlone: node.StandAlone, - } - - dcs = append(dcs, dc) - - // create each etcd configmap for each deployment - err := c.CreateEachConfigMap(config.EtcdConfigMapDataKey, etcdConfig, currentConfigMapName) - if err != nil { - return nil, err - } - - // make etcd deployment - d, err := c.makeDeployment(node.NodeName, node.NodeIP, etcdConfig) - if err != nil { - return nil, err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return nil, errors.Wrapf(err, "failed to create etcd deployment %s", resourceName) - } - logger.Infof("deployment for etcd %s already exists. updating if needed", resourceName) - - // if err := k8sutil.UpdateDeploymentAndWait(context.TODO(), &c.Context, d, c.Namespace, nil); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return nil, err - } - } - - k8sutil.UpdateStatusCondition(c.Kind, context.TODO(), &c.Context, c.NamespacedName, curvev1.ConditionTypeEtcdReady, curvev1.ConditionTrue, curvev1.ConditionEtcdClusterCreatedReason, "Etcd cluster has been created") - - return dcs, nil -} diff --git a/pkg/etcd/spec.go b/pkg/etcd/spec.go deleted file mode 100644 index 4f4fba13..00000000 --- a/pkg/etcd/spec.go +++ /dev/null @@ -1,171 +0,0 @@ -package etcd - -import ( - "fmt" - "path" - "strconv" - - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/logrotate" -) - -// createOverrideConfigMap create configMap override to record the endpoints of etcd for mds use -func (c *Cluster) createOverrideConfigMap(etcdEndpoints string, clusterEtcdAddr string) error { - etcdConfigMapData := map[string]string{ - config.EtcdOvverideConfigMapDataKey: etcdEndpoints, - config.ClusterEtcdAddr: clusterEtcdAddr, - } - - // etcd-endpoints-override configmap only has one "etcdEndpoints" key that the value is etcd cluster endpoints - overrideCM := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.EtcdOverrideConfigMapName, - Namespace: c.NamespacedName.Namespace, - }, - Data: etcdConfigMapData, - } - err := c.OwnerInfo.SetControllerReference(overrideCM) - if err != nil { - return err - } - - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(overrideCM) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create override configmap %s", c.NamespacedName.Namespace) - } - logger.Infof("ConfigMap for override etcd endpoints %s already exists. updating if needed", config.EtcdOverrideConfigMapName) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("ConfigMap %s for override etcd endpoints has been created", config.EtcdOverrideConfigMapName) - } - - return nil -} - -// makeDeployment make etcd deployment to run etcd server -func (c *Cluster) makeDeployment(nodeName string, ip string, etcdConfig *etcdConfig) (*apps.Deployment, error) { - volumes := daemon.DaemonVolumes(config.EtcdConfigMapDataKey, etcdConfig.ConfigMapMountPath, etcdConfig.DataPathMap, etcdConfig.CurrentConfigMapName) - labels := daemon.CephDaemonAppLabels(AppName, c.Namespace, "etcd", etcdConfig.DaemonID, c.Kind) - - // add log config volume - logConfCMVolSource := &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "log-conf"}} - volumes = append(volumes, v1.Volume{Name: "log-conf", VolumeSource: v1.VolumeSource{ConfigMap: logConfCMVolSource}}) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: etcdConfig.ResourceName, - Labels: labels, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - c.makeChmodDirInitContainer(etcdConfig), - }, - Containers: []v1.Container{ - c.makeEtcdDaemonContainer(nodeName, ip, etcdConfig, etcdConfig.ClusterEtcdHttpAddr), - logrotate.MakeLogrotateContainer(), - }, - NodeName: nodeName, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: etcdConfig.ResourceName, - Namespace: c.NamespacedName.Namespace, - Labels: labels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to etcd deployment %q", d.Name) - } - - return d, nil -} - -// makeChmodDirInitContainer make init container to chmod 700 of ContainerDataDir('/curvebs/etcd/data') -func (c *Cluster) makeChmodDirInitContainer(etcdConfig *etcdConfig) v1.Container { - container := v1.Container{ - Name: "chmod", - // Args: args, - Command: []string{"chmod", "700", etcdConfig.DataPathMap.ContainerDataDir}, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: daemon.DaemonVolumeMounts(config.EtcdConfigMapDataKey, etcdConfig.ConfigMapMountPath, etcdConfig.DataPathMap, etcdConfig.CurrentConfigMapName), - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - return container -} - -// makeEtcdDaemonContainer create etcd container -func (c *Cluster) makeEtcdDaemonContainer(nodeName string, ip string, etcdConfig *etcdConfig, init_cluster string) v1.Container { - clientPort, _ := strconv.Atoi(etcdConfig.ServiceClientPort) - peerPort, _ := strconv.Atoi(etcdConfig.ServicePort) - var commandLine string - if c.Kind == config.KIND_CURVEBS { - commandLine = "/curvebs/etcd/sbin/etcd" - } else { - commandLine = "/curvefs/etcd/sbin/etcd" - } - - configFileMountPath := path.Join(etcdConfig.ConfigMapMountPath, config.EtcdConfigMapDataKey) - argsConfigFileDir := fmt.Sprintf("--config-file=%s", configFileMountPath) - - container := v1.Container{ - Name: "etcd", - Command: []string{ - commandLine, - }, - Args: []string{ - argsConfigFileDir, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: daemon.DaemonVolumeMounts(config.EtcdConfigMapDataKey, etcdConfig.ConfigMapMountPath, etcdConfig.DataPathMap, etcdConfig.CurrentConfigMapName), - Ports: []v1.ContainerPort{ - { - Name: "listen-port", - ContainerPort: int32(clientPort), - HostPort: int32(clientPort), - Protocol: v1.ProtocolTCP, - }, - { - Name: "peer-port", - ContainerPort: int32(peerPort), - HostPort: int32(peerPort), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - return container -} diff --git a/pkg/k8sutil/condition.go b/pkg/k8sutil/condition.go index 32bf0279..871b75bf 100644 --- a/pkg/k8sutil/condition.go +++ b/pkg/k8sutil/condition.go @@ -4,64 +4,39 @@ import ( "context" "time" - "github.com/pkg/errors" - kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" curvev1 "github.com/opencurve/curve-operator/api/v1" "github.com/opencurve/curve-operator/pkg/clusterd" - "github.com/opencurve/curve-operator/pkg/config" ) -func UpdateStatusCondition(kind string, ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType curvev1.ConditionType, status curvev1.ConditionStatus, reason curvev1.ConditionReason, message string) error { - if kind == config.KIND_CURVEBS { - err := UpdateCondition(ctx, c, namespaceName, conditionType, status, reason, message) - if err != nil { - return err - } - } else { - err := UpdateFSCondition(ctx, c, namespaceName, conditionType, status, reason, message) - if err != nil { - return err - } - } - return nil -} - -// UpdateCondition function will export each condition into the BS cluster custom resource -func UpdateCondition(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType curvev1.ConditionType, status curvev1.ConditionStatus, reason curvev1.ConditionReason, message string) error { - return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { +// UpdateCondition function will export each condition into the cluster custom resource +func UpdateCondition(ctx context.Context, client client.Client, kind string, namespacedName types.NamespacedName, phase curvev1.ClusterPhase, condition curvev1.ClusterCondition) { + // use client.Client unit test this more easily with updating statuses which must use the client + switch kind { + case clusterd.KIND_CURVEBS: cluster := &curvev1.CurveCluster{} - if err := c.Client.Get(ctx, namespaceName, cluster); err != nil { - logger.Errorf("failed to get cluster %v to update the conditions. %v", namespaceName, err) - return err + if err := client.Get(ctx, namespacedName, cluster); err != nil { + logger.Errorf("failed to get cluster %v to update the conditions. %v", namespacedName, err) + return } - - return UpdateClusterCondition(c, cluster, namespaceName, conditionType, status, reason, message, false) - }) -} - -// UpdateFSCondition function will export each condition into the FS cluster custom resource -func UpdateFSCondition(ctx context.Context, c *clusterd.Context, namespaceName types.NamespacedName, conditionType curvev1.ConditionType, status curvev1.ConditionStatus, reason curvev1.ConditionReason, message string) error { - return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) { + UpdateBsClusterCondition(client, cluster, phase, condition) + case clusterd.KIND_CURVEFS: cluster := &curvev1.Curvefs{} - if err := c.Client.Get(ctx, namespaceName, cluster); err != nil { - logger.Errorf("failed to get cluster %v to update the conditions. %v", namespaceName, err) - return err + if err := client.Get(ctx, namespacedName, cluster); err != nil { + logger.Errorf("failed to get cluster %v to update the conditions. %v", namespacedName, err) + return } - - return UpdateFSClusterCondition(c, cluster, namespaceName, conditionType, status, reason, message, false) - }) + UpdateFsClusterCondition(client, cluster, phase, condition) + default: + logger.Errorf("Unknown cluster kind %q", kind) + } } -// UpdateFSClusterCondition function will export each condition into the cluster custom resource -func UpdateFSClusterCondition(c *clusterd.Context, cluster *curvev1.Curvefs, namespaceName types.NamespacedName, conditionType curvev1.ConditionType, status curvev1.ConditionStatus, - reason curvev1.ConditionReason, message string, preserveAllConditions bool) error { - +// UpdateFsClusterCondition function will export each condition into the cluster custom resource +func UpdateFsClusterCondition(client client.Client, cluster *curvev1.Curvefs, phase curvev1.ClusterPhase, newCondition curvev1.ClusterCondition) { // Keep the conditions that already existed if they are in the list of long-term conditions, // otherwise discard the temporary conditions var currentCondition *curvev1.ClusterCondition @@ -69,64 +44,52 @@ func UpdateFSClusterCondition(c *clusterd.Context, cluster *curvev1.Curvefs, nam for _, condition := range cluster.Status.Conditions { // Only keep conditions in the list if it's a persisted condition such as the cluster creation being completed. // The transient conditions are not persisted. However, if the currently requested condition is not expected to - // reset the transient conditions, they are retained. For example, if the operator is checking for curve health + // reset the transient conditions, they are retained. For example, if the operator is checking for ceph health // in the middle of the reconcile, the progress condition should not be reset by the status check update. - if preserveAllConditions || - condition.Type == curvev1.ConditionTypeEtcdReady || - condition.Type == curvev1.ConditionTypeMdsReady || - condition.Type == curvev1.ConditionTypeMetaServerReady || - condition.Type == curvev1.ConditionTypeSnapShotCloneReady { - if conditionType != condition.Type { - conditions = append(conditions, condition) + if condition.Type == curvev1.ConditionClusterReady { + if newCondition.Type != condition.Type { + conditions = append(conditions, newCondition) continue } - // Update the existing condition with the new status currentCondition = condition.DeepCopy() - if currentCondition.Status != status || currentCondition.Message != message { + if currentCondition.Status != newCondition.Status || currentCondition.Message != newCondition.Message { // Update the last transition time since the status changed currentCondition.LastTransitionTime = metav1.NewTime(time.Now()) } - currentCondition.Status = status - currentCondition.Reason = reason - currentCondition.Message = message + currentCondition.Status = newCondition.Status + currentCondition.Reason = newCondition.Reason + currentCondition.Message = newCondition.Message } } - - // Create a new condition since not found in the existing conditions if currentCondition == nil { + // Create a new condition since not found in the existing conditions currentCondition = &curvev1.ClusterCondition{ - Type: conditionType, - Status: status, - Reason: reason, - Message: message, + Type: newCondition.Type, + Status: newCondition.Status, + Reason: newCondition.Reason, + Message: newCondition.Message, LastTransitionTime: metav1.NewTime(time.Now()), } } - conditions = append(conditions, *currentCondition) cluster.Status.Conditions = conditions // Once the cluster begins deleting, the phase should not revert back to any other phase - if cluster.Status.Phase != curvev1.ClusterPhaseDeleting { - cluster.Status.Phase = translateConditionType2Phase(conditionType) + if string(cluster.Status.Phase) != string(curvev1.ConditionDeleting) { + cluster.Status.Phase = phase cluster.Status.Message = currentCondition.Message - cluster.Status.CurveVersion.Image = cluster.Spec.CurveVersion.Image - logger.Debugf("CurveCluster %q status: %q. %q", namespaceName.Namespace, cluster.Status.Phase, cluster.Status.Message) + logger.Debugf("CurveFsCluster %q status: %q. %q", cluster.GetName(), cluster.Status.Phase, cluster.Status.Message) } - if err := UpdateStatus(c.Client, namespaceName, cluster); err != nil { + if err := client.Status().Update(context.TODO(), cluster); err != nil { logger.Errorf("failed to update cluster condition to %+v. %v", *currentCondition, err) - return err - } - return nil + } } -// UpdateClusterCondition function will export each condition into the cluster custom resource -func UpdateClusterCondition(c *clusterd.Context, cluster *curvev1.CurveCluster, namespaceName types.NamespacedName, conditionType curvev1.ConditionType, status curvev1.ConditionStatus, - reason curvev1.ConditionReason, message string, preserveAllConditions bool) error { - +// UpdateBsClusterCondition function will export each condition into the cluster custom resource +func UpdateBsClusterCondition(client client.Client, cluster *curvev1.CurveCluster, phase curvev1.ClusterPhase, newCondition curvev1.ClusterCondition) { // Keep the conditions that already existed if they are in the list of long-term conditions, // otherwise discard the temporary conditions var currentCondition *curvev1.ClusterCondition @@ -134,89 +97,46 @@ func UpdateClusterCondition(c *clusterd.Context, cluster *curvev1.CurveCluster, for _, condition := range cluster.Status.Conditions { // Only keep conditions in the list if it's a persisted condition such as the cluster creation being completed. // The transient conditions are not persisted. However, if the currently requested condition is not expected to - // reset the transient conditions, they are retained. For example, if the operator is checking for curve health + // reset the transient conditions, they are retained. For example, if the operator is checking for ceph health // in the middle of the reconcile, the progress condition should not be reset by the status check update. - if preserveAllConditions || - condition.Type == curvev1.ConditionTypeEtcdReady || - condition.Type == curvev1.ConditionTypeMdsReady || - condition.Type == curvev1.ConditionTypeFormatedReady || - condition.Type == curvev1.ConditionTypeChunkServerReady || - condition.Type == curvev1.ConditionTypeSnapShotCloneReady { - if conditionType != condition.Type { - conditions = append(conditions, condition) + if condition.Type == curvev1.ConditionClusterReady { + if newCondition.Type != condition.Type { + conditions = append(conditions, newCondition) continue } - // Update the existing condition with the new status currentCondition = condition.DeepCopy() - if currentCondition.Status != status || currentCondition.Message != message { + if currentCondition.Status != newCondition.Status || currentCondition.Message != newCondition.Message { // Update the last transition time since the status changed currentCondition.LastTransitionTime = metav1.NewTime(time.Now()) } - currentCondition.Status = status - currentCondition.Reason = reason - currentCondition.Message = message + currentCondition.Status = newCondition.Status + currentCondition.Reason = newCondition.Reason + currentCondition.Message = newCondition.Message } } - - // Create a new condition since not found in the existing conditions if currentCondition == nil { + // Create a new condition since not found in the existing conditions currentCondition = &curvev1.ClusterCondition{ - Type: conditionType, - Status: status, - Reason: reason, - Message: message, + Type: newCondition.Type, + Status: newCondition.Status, + Reason: newCondition.Reason, + Message: newCondition.Message, LastTransitionTime: metav1.NewTime(time.Now()), } } - conditions = append(conditions, *currentCondition) cluster.Status.Conditions = conditions // Once the cluster begins deleting, the phase should not revert back to any other phase - if cluster.Status.Phase != curvev1.ClusterPhaseDeleting { - cluster.Status.Phase = translateConditionType2Phase(conditionType) + if string(cluster.Status.Phase) != string(curvev1.ConditionDeleting) { + cluster.Status.Phase = phase cluster.Status.Message = currentCondition.Message - cluster.Status.CurveVersion.Image = cluster.Spec.CurveVersion.Image - logger.Debugf("CurveCluster %q status: %q. %q", namespaceName.Namespace, cluster.Status.Phase, cluster.Status.Message) + logger.Debugf("CurveBsCluster %q status: %q. %q", cluster.GetName(), cluster.Status.Phase, cluster.Status.Message) } - if err := UpdateStatus(c.Client, namespaceName, cluster); err != nil { + if err := client.Status().Update(context.TODO(), cluster); err != nil { logger.Errorf("failed to update cluster condition to %+v. %v", *currentCondition, err) - return err - } - return nil -} -func translateConditionType2Phase(conditionType curvev1.ConditionType) curvev1.ConditionType { - if conditionType == curvev1.ConditionTypeEtcdReady || - conditionType == curvev1.ConditionTypeMdsReady || - conditionType == curvev1.ConditionTypeFormatedReady || - conditionType == curvev1.ConditionTypeChunkServerReady || - conditionType == curvev1.ConditionTypeSnapShotCloneReady { - return curvev1.ClusterPhasePending } - return conditionType -} - -// UpdateStatus updates an object with a given status. The object is updated with the latest version -// from the server on a successful update. -func UpdateStatus(client client.Client, namespaceName types.NamespacedName, obj runtime.Object) error { - nsName := types.NamespacedName{ - Namespace: namespaceName.Namespace, - Name: namespaceName.Name, - } - - // Try to update the status - err := client.Status().Update(context.Background(), obj) - // If the object doesn't exist yet, we need to initialize it - if kerrors.IsNotFound(err) { - err = client.Update(context.Background(), obj) - } - - if err != nil { - return errors.Wrapf(err, "failed to update object %q status", nsName.String()) - } - - return nil } diff --git a/pkg/k8sutil/configmap.go b/pkg/k8sutil/configmap.go new file mode 100644 index 00000000..be9a2fe5 --- /dev/null +++ b/pkg/k8sutil/configmap.go @@ -0,0 +1,83 @@ +package k8sutil + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// IsConfigMapExist check the ConfigMap if exist in specified namespace +func IsConfigMapExist(clientset kubernetes.Interface, c *corev1.ConfigMap) (bool, error) { + _, err := clientset.CoreV1().ConfigMaps(c.Namespace).Get(c.Name, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, errors.Wrapf(err, "failed to check whether ConfigMap %s is exist", c.Name) + } + return true, nil +} + +// GetConfigMap get configmap in specified namespace +func GetConfigMapByName(clientset kubernetes.Interface, namespace, name string) (*corev1.ConfigMap, error) { + existConfigMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + return existConfigMap, err +} + +// GetConfigMap get configmap in specified namespace +func GetConfigMap(clientset kubernetes.Interface, c *corev1.ConfigMap) (*corev1.ConfigMap, error) { + existConfigMap, err := clientset.CoreV1().ConfigMaps(c.Namespace).Get(c.Name, metav1.GetOptions{}) + return existConfigMap, err +} + +// CreateNewConfigMap create a new ConfigMap in specified namespace +func CreateNewConfigMap(clientset kubernetes.Interface, c *corev1.ConfigMap) error { + _, err := clientset.CoreV1().ConfigMaps(c.Namespace).Create(c) + if err != nil { + return errors.Wrapf(err, "failed to create ConfigMap %s in namespace %s", c.Name, c.Namespace) + } + return nil +} + +// DeleteConfigMap delete a ConfigMap in specified namespace +func DeleteConfigMap(clientset kubernetes.Interface, c *corev1.ConfigMap) error { + err := clientset.CoreV1().ConfigMaps(c.Namespace).Delete(c.Name, &metav1.DeleteOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to delete ConfigMap %s in namespace %s", c.Name, c.Namespace) + } + + return nil +} + +// UpdateDeploymentAndWaitStart update a ConfigMap in specified namespace +func UpdateConfigMap(clientset kubernetes.Interface, c *corev1.ConfigMap) (*corev1.ConfigMap, error) { + updatedConfigMap, err := clientset.CoreV1().ConfigMaps(c.Namespace).Update(c) + if err != nil { + return nil, errors.Wrapf(err, "failed to update ConfigMap %s in namespace %s", c.Name, c.Namespace) + } + + return updatedConfigMap, nil +} + +// CreateOrUpdate create ConfigMap if not exist or update the ConfigMap. +func CreateOrUpdateConfigMap(clientset kubernetes.Interface, c *corev1.ConfigMap) (*corev1.ConfigMap, error) { + isExist, err := IsConfigMapExist(clientset, c) + if err != nil { + return nil, err + } + + if !isExist { + err = CreateNewConfigMap(clientset, c) + if err != nil { + return nil, err + } + return nil, nil + } + updateConfigMap, err := UpdateConfigMap(clientset, c) + if err != nil { + return nil, err + } + return updateConfigMap, nil +} diff --git a/pkg/k8sutil/deployment.go b/pkg/k8sutil/deployment.go index 97204726..c0d6ef23 100644 --- a/pkg/k8sutil/deployment.go +++ b/pkg/k8sutil/deployment.go @@ -1,88 +1,99 @@ package k8sutil import ( - "context" "fmt" "time" + "emperror.dev/errors" appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" - - "github.com/opencurve/curve-operator/pkg/clusterd" - "github.com/opencurve/curve-operator/pkg/k8sutil/patch" + "k8s.io/client-go/kubernetes" ) -// UpdateDeploymentAndWait updates a deployment and waits until it is running to return. It will -// error if the deployment does not exist to be updated or if it takes too long. -// This method has a generic callback function that each backend can rely on -// It serves two purposes: -// 1. verify that a resource can be stopped -// 2. verify that we can continue the update procedure -// -// Basically, we go one resource by one and check if we can stop and then if the resource has been successfully updated -// we check if we can go ahead and move to the next one. -func UpdateDeploymentAndWait(ctx context.Context, clusterContext *clusterd.Context, modifiedDeployment *appsv1.Deployment, namespace string, verifyCallback func(action string) error) error { - currentDeployment, err := clusterContext.Clientset.AppsV1().Deployments(namespace).Get(modifiedDeployment.Name, metav1.GetOptions{}) +// IsDeploymentExist check the Deployment if exist in specified namespace +func IsDeploymentExist(clientset kubernetes.Interface, d *appsv1.Deployment) (bool, error) { + _, err := clientset.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to get deployment %s. %+v", modifiedDeployment.Name, err) + if apierrors.IsNotFound(err) { + return false, nil + } + return false, errors.Wrapf(err, "failed to check whether Deployment %s is exist", d.Name) } + return true, nil +} - // Check whether the current deployment and newly generated one are identical - patchChanged := false - patchResult, err := patch.DefaultPatchMaker.Calculate(currentDeployment, modifiedDeployment) +// GetDeployment get a Deployment in specified namespace +func GetDeployment(clientset kubernetes.Interface, d *appsv1.Deployment) error { + err := clientset.AppsV1().Deployments(d.Namespace).Delete(d.Name, &metav1.DeleteOptions{}) if err != nil { - logger.Warningf("failed to calculate diff between current deployment %q and newly generated one. Assuming it changed. %v", currentDeployment.Name, err) - patchChanged = true - } else if !patchResult.IsEmpty() { - patchChanged = true + return errors.Wrapf(err, "failed to delete Deployment %s in namespace %s", d.Name, d.Namespace) } - if !patchChanged { - logger.Infof("deployment %q did not change, nothing to update", currentDeployment.Name) - return nil + return nil +} + +// CreateNewDeploymentAndWaitStart create a new Deployment in specified namespace and wait it to start up +func CreateNewDeploymentAndWaitStart(clientset kubernetes.Interface, d *appsv1.Deployment) error { + newDeploy, err := clientset.AppsV1().Deployments(d.Namespace).Create(d) + if err != nil { + return err } - // If deployments are different, let's update! - logger.Infof("updating deployment %q after verifying it is safe to stop", modifiedDeployment.Name) + if err := WaitForDeploymentToStart(clientset, newDeploy); err != nil { + return err + } - // Let's verify the deployment can be stopped - // if err := verifyCallback("stop"); err != nil { - // return fmt.Errorf("failed to check if deployment %q can be updated. %v", modifiedDeployment.Name, err) - // } + return nil +} - // Set hash annotation to the newly generated deployment - if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(modifiedDeployment); err != nil { - return fmt.Errorf("failed to set hash annotation on deployment %q. %v", modifiedDeployment.Name, err) +// UpdateDeploymentAndWaitStart update a Deployment and wait it to start +func UpdateDeploymentAndWaitStart(clientset kubernetes.Interface, d *appsv1.Deployment) (*appsv1.Deployment, error) { + updatedDeploy, err := clientset.AppsV1().Deployments(d.Namespace).Update(d) + if err != nil { + return nil, errors.Wrapf(err, "failed to update Deployment %s in namespace %s", d.Name, d.Namespace) } - if _, err := clusterContext.Clientset.AppsV1().Deployments(namespace).Update(modifiedDeployment); err != nil { - return fmt.Errorf("failed to update deployment %q. %v", modifiedDeployment.Name, err) + if err := WaitForDeploymentToStart(clientset, updatedDeploy); err != nil { + return nil, err } - if err := WaitForDeploymentToStart(ctx, clusterContext, currentDeployment); err != nil { - return err + return updatedDeploy, nil +} + +// CreateOrUpdateDeploymentAndWaitStart create Deployment if not exist or update deployment and wait it to start. +func CreateOrUpdateDeploymentAndWaitStart(clientset kubernetes.Interface, d *appsv1.Deployment) (*appsv1.Deployment, error) { + isExist, err := IsDeploymentExist(clientset, d) + if err != nil { + return nil, err } - // Now we check if we can go to the next daemon - // if err := verifyCallback("continue"); err != nil { - // return fmt.Errorf("failed to check if deployment %q can continue: %v", modifiedDeployment.Name, err) - // } - return nil + if !isExist { + err = CreateNewDeploymentAndWaitStart(clientset, d) + if err != nil { + return nil, err + } + return nil, nil + } + + newDeploy, err := UpdateDeploymentAndWaitStart(clientset, d) + if err != nil { + return nil, err + } + return newDeploy, nil } -func WaitForDeploymentToStart(ctx context.Context, clusterdContext *clusterd.Context, deployment *appsv1.Deployment) error { - // wait for the deployment to be restarted up to 300s(5min) +func WaitForDeploymentToStart(clientset kubernetes.Interface, d *appsv1.Deployment) error { sleepTime := 3 attempts := 100 for i := 0; i < attempts; i++ { - // check for the status of the deployment - d, err := clusterdContext.Clientset.AppsV1().Deployments(deployment.Namespace).Get(deployment.Name, metav1.GetOptions{}) + deploy, err := clientset.AppsV1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to get deployment %q. %v", deployment.Name, err) + return errors.Wrapf(err, "failed to get Deployment %s in namespace %s", d.Name, d.Namespace) } - if d.Status.ObservedGeneration >= deployment.Status.ObservedGeneration && d.Status.UpdatedReplicas > 0 && d.Status.ReadyReplicas > 0 { - logger.Infof("finished waiting for updated deployment %q", d.Name) + if deploy.Status.ObservedGeneration >= d.Status.ObservedGeneration && + deploy.Status.UpdatedReplicas > 0 && + deploy.Status.ReadyReplicas > 0 { return nil } @@ -90,32 +101,22 @@ func WaitForDeploymentToStart(ctx context.Context, clusterdContext *clusterd.Con // This can happen if one of the deployment cannot be scheduled on a node and stays in "pending" state for _, condition := range d.Status.Conditions { if condition.Type == appsv1.DeploymentProgressing && condition.Reason == "ProgressDeadlineExceeded" { - return fmt.Errorf("gave up waiting for deployment %q to update because %q", deployment.Name, condition.Reason) + return fmt.Errorf("gave up waiting for deployment %s to update because %s", d.Name, condition.Reason) } } - logger.Debugf("deployment %q status=%+v", d.Name, d.Status) - time.Sleep(time.Duration(sleepTime) * time.Second) } - return fmt.Errorf("gave up waiting for deployment %q to update", deployment.Name) -} -// DeleteSyncConfigDeployment delete the SyncConfigDeployment after the cluster is deployed. -func DeleteSyncConfigDeployment(ctx context.Context, clusterdContext *clusterd.Context, syncConfigDeployment, namespace string) error { - err := retry.OnError(retry.DefaultRetry, func(err error) bool { - // retrying for any error that occurs - return true - }, func() error { - return clusterdContext.Clientset.AppsV1().Deployments(namespace).Delete(syncConfigDeployment, &metav1.DeleteOptions{}) - }) + return fmt.Errorf("give up waiting for deployment %q to update", d.Name) +} +// DeleteDeployment delete a Deployment in specified namespace +func DeleteDeployment(clientset kubernetes.Interface, d *appsv1.Deployment) error { + err := clientset.AppsV1().Deployments(d.Namespace).Delete(d.Name, &metav1.DeleteOptions{}) if err != nil { - return fmt.Errorf("failed to delete deployment %q after the curve cluster has been deployed. %v", - syncConfigDeployment, err) + return errors.Wrapf(err, "failed to delete Deployment %s in namespace %s", d.Name, d.Namespace) } - logger.Infof("the curve cluster has been deployed and the deployment %q has been deleted", syncConfigDeployment) - return nil } diff --git a/pkg/controllers/finanlizer.go b/pkg/k8sutil/finializer.go similarity index 62% rename from pkg/controllers/finanlizer.go rename to pkg/k8sutil/finializer.go index f00ad039..bacdaf5e 100644 --- a/pkg/controllers/finanlizer.go +++ b/pkg/k8sutil/finializer.go @@ -1,4 +1,4 @@ -package controllers +package k8sutil import ( "context" @@ -6,60 +6,13 @@ import ( "strings" "github.com/pkg/errors" - kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - - curvev1 "github.com/opencurve/curve-operator/api/v1" ) -func removeFinalizer(cli client.Client, name types.NamespacedName, obj runtime.Object, finalizer string) error { - err := cli.Get(context.Background(), name, obj) - if err != nil { - if kerrors.IsNotFound(err) { - logger.Debugf("%s resource not found. Ignoring since object must be deleted.", name.Name) - return nil - } - return errors.Wrapf(err, "failed to retrieve %q to remove finalizer", name.Name) - } - - if finalizer == "" { - err = RemoveFinalizer(context.Background(), cli, obj, name) - if err != nil { - return err - } - } else { - err = RemoveFinalizerWithName(context.Background(), cli, obj, name, finalizer) - if err != nil { - return err - } - } - return nil -} - -// contains checks if an item exists in a given list. -func contains(list []string, s string) bool { - for _, v := range list { - if v == s { - return true - } - } - - return false -} - -// remove removes any element from a list -func remove(list []string, s string) []string { - for i, v := range list { - if v == s { - list = append(list[:i], list[i+1:]...) - } - } - - return list -} +const CustomResourceGroup = "curve.opencurve.io" // AddFinalizerIfNotPresent adds a finalizer an object to avoid instant deletion // of the object without finalizing it. @@ -74,7 +27,8 @@ func AddFinalizerIfNotPresent(ctx context.Context, cli client.Client, obj runtim logger.Infof("adding finalizer %q on %q", objectFinalizer, accessor.GetName()) accessor.SetFinalizers(append(accessor.GetFinalizers(), objectFinalizer)) // Update CR with finalizer - if err := cli.Update(ctx, obj); err != nil { + err = cli.Update(ctx, obj) + if err != nil { return errors.Wrapf(err, "failed to add finalizer %q on %q", objectFinalizer, accessor.GetName()) } } @@ -83,14 +37,14 @@ func AddFinalizerIfNotPresent(ctx context.Context, cli client.Client, obj runtim } // RemoveFinalizer removes a finalizer from an object -func RemoveFinalizer(ctx context.Context, cli client.Client, obj runtime.Object, namespacedName types.NamespacedName) error { +func RemoveFinalizer(ctx context.Context, client client.Client, namespacedName types.NamespacedName, obj runtime.Object) error { finalizerName := buildFinalizerName(obj.GetObjectKind().GroupVersionKind().Kind) - return RemoveFinalizerWithName(ctx, cli, obj, namespacedName, finalizerName) + return RemoveFinalizerWithName(ctx, client, namespacedName, obj, finalizerName) } // RemoveFinalizerWithName removes finalizer passed as an argument from an object -func RemoveFinalizerWithName(ctx context.Context, cli client.Client, obj runtime.Object, namespacedName types.NamespacedName, finalizerName string) error { - err := cli.Get(ctx, namespacedName, obj) +func RemoveFinalizerWithName(ctx context.Context, client client.Client, namespacedName types.NamespacedName, obj runtime.Object, finalizerName string) error { + err := client.Get(ctx, namespacedName, obj) if err != nil { return errors.Wrap(err, "failed to get the latest version of the object") } @@ -102,7 +56,7 @@ func RemoveFinalizerWithName(ctx context.Context, cli client.Client, obj runtime if contains(accessor.GetFinalizers(), finalizerName) { logger.Infof("removing finalizer %q on %q", finalizerName, accessor.GetName()) accessor.SetFinalizers(remove(accessor.GetFinalizers(), finalizerName)) - if err := cli.Update(ctx, obj); err != nil { + if err := client.Update(ctx, obj); err != nil { return errors.Wrapf(err, "failed to remove finalizer %q on %q", finalizerName, accessor.GetName()) } } @@ -112,5 +66,27 @@ func RemoveFinalizerWithName(ctx context.Context, cli client.Client, obj runtime // buildFinalizerName returns the finalizer name func buildFinalizerName(kind string) string { - return fmt.Sprintf("%s.%s", strings.ToLower(kind), curvev1.CustomResourceGroup) + return fmt.Sprintf("%s.%s", strings.ToLower(kind), CustomResourceGroup) +} + +// contains checks if an item exists in a given list. +func contains(list []string, s string) bool { + for _, v := range list { + if v == s { + return true + } + } + + return false +} + +// remove removes any element from a list +func remove(list []string, s string) []string { + for i, v := range list { + if v == s { + list = append(list[:i], list[i+1:]...) + } + } + + return list } diff --git a/pkg/k8sutil/job.go b/pkg/k8sutil/job.go index 3a007ed9..76a7f9f3 100644 --- a/pkg/k8sutil/job.go +++ b/pkg/k8sutil/job.go @@ -1,7 +1,6 @@ package k8sutil import ( - "context" "fmt" "time" @@ -16,12 +15,11 @@ import ( // another call to this function with the same job name. For example, if a storage operator is // restarted/updated before the job can complete, the operator's next run of the job should replace // the previous job if deleteIfFound is set to true. -func RunReplaceableJob(ctx context.Context, clientset kubernetes.Interface, job *batch.Job, deleteIfFound bool) error { +func RunReplaceableJob(clientset kubernetes.Interface, job *batch.Job, deleteIfFound bool) error { // check if the job was already created and what its status is existingJob, err := clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) if err != nil && !errors.IsNotFound(err) { - // error but found, will recreate it - logger.Warningf("failed to detect job %s. %+v", job.Name, err) + // do nothing } else if err == nil { // if the job is still running, and the caller has not asked for deletion, // allow it to continue to completion @@ -32,7 +30,7 @@ func RunReplaceableJob(ctx context.Context, clientset kubernetes.Interface, job // delete the job that already exists from a previous run logger.Infof("Removing previous job %s to start a new one", job.Name) - err := DeleteBatchJob(ctx, clientset, job.Namespace, existingJob.Name, true) + err := DeleteBatchJob(clientset, job.Namespace, existingJob.Name, true) if err != nil { return fmt.Errorf("failed to remove job %s. %+v", job.Name, err) } @@ -44,7 +42,7 @@ func RunReplaceableJob(ctx context.Context, clientset kubernetes.Interface, job } // DeleteBatchJob deletes a Kubernetes job. -func DeleteBatchJob(ctx context.Context, clientset kubernetes.Interface, namespace, name string, wait bool) error { +func DeleteBatchJob(clientset kubernetes.Interface, namespace, name string, wait bool) error { propagation := metav1.DeletePropagationForeground gracePeriod := int64(0) options := &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod, PropagationPolicy: &propagation} @@ -52,7 +50,7 @@ func DeleteBatchJob(ctx context.Context, clientset kubernetes.Interface, namespa if errors.IsNotFound(err) { return nil } - return fmt.Errorf("failed to remove previous provisioning job for node %s. %+v", name, err) + return err } if !wait { @@ -80,8 +78,7 @@ func DeleteBatchJob(ctx context.Context, clientset kubernetes.Interface, namespa // WaitForJobCompletion waits for a job to reach the completed state. // Assumes that only one pod needs to complete. -func WaitForJobCompletion(ctx context.Context, clientset kubernetes.Interface, job *batch.Job, timeout time.Duration) error { - logger.Infof("waiting for job %s to complete...", job.Name) +func WaitForJobCompletion(clientset kubernetes.Interface, job *batch.Job, timeout time.Duration) error { return wait.Poll(5*time.Second, timeout, func() (bool, error) { job, err := clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) if err != nil { @@ -90,11 +87,10 @@ func WaitForJobCompletion(ctx context.Context, clientset kubernetes.Interface, j // if the job is still running, allow it to continue to completion if job.Status.Active > 0 { - logger.Debugf("job is still running. Status=%+v", job.Status) return false, nil } if job.Status.Failed > 0 { - return false, fmt.Errorf("job %s failed", job.Name) + return false, fmt.Errorf("job %s run failed", job.Name) } if job.Status.Succeeded > 0 { return true, nil diff --git a/pkg/k8sutil/name.go b/pkg/k8sutil/name.go deleted file mode 100644 index d2883a63..00000000 --- a/pkg/k8sutil/name.go +++ /dev/null @@ -1,57 +0,0 @@ -package k8sutil - -import "fmt" - -const ( - maxPerChar = 26 -) - -// IndexToName converts an index to a daemon name based on as few letters of the alphabet as possible. -// For example: -// -// 0 -> a -// 1 -> b -// 25 -> z -// 26 -> aa -func IndexToName(index int) string { - var result string - for { - i := index % maxPerChar - c := 'z' - maxPerChar + i + 1 - result = fmt.Sprintf("%c%s", c, result) - if index < maxPerChar { - break - } - // subtract 1 since the character conversion is zero-based - index = (index / maxPerChar) - 1 - } - return result -} - -// NameToIndex converts a daemon name to an index, which is the inverse of IndexToName -// For example: -// -// a -> 0 -// b -> 1 -func NameToIndex(name string) (int, error) { - factor := 1 - for i := 1; i < len(name); i++ { - factor *= maxPerChar - } - var result int - for _, c := range name { - charVal := int(maxPerChar - ('z' - c)) - if charVal < 1 || charVal > maxPerChar { - return -1, fmt.Errorf("invalid char '%c' (%d) in %s", c, charVal, name) - } - if factor == 1 { - // The least significant letter needs to be 0-based so we subtract 1 - result += charVal - 1 - } else { - result += charVal * factor - } - factor /= maxPerChar - } - - return result, nil -} diff --git a/pkg/k8sutil/node.go b/pkg/k8sutil/node.go index 0c7ca4c0..2d08a3f5 100644 --- a/pkg/k8sutil/node.go +++ b/pkg/k8sutil/node.go @@ -12,72 +12,38 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/kubernetes" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/clusterd" ) var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "k8sutil") -type NodesToDeploy struct { - NodeName string - NodeIP string -} - -// GetNodeInfoMap get node ip by node name that user specified and return a mapping of nodeName:nodeIP -func GetNodeInfoMap(nodes []string, clientset kubernetes.Interface) ([]NodesToDeploy, error) { - nodeNameIP := []NodesToDeploy{} - for _, nodeName := range nodes { - n, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to find node %s from cluster", nodeName) - } - - for _, address := range n.Status.Addresses { - if address.Type == "InternalIP" { - nodeNameIP = append(nodeNameIP, NodesToDeploy{ - NodeName: nodeName, - NodeIP: address.Address, - }) - } - } +func GetNodeIpByName(nodeName string, clientset kubernetes.Interface) (string, error) { + n, err := clientset.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + if err != nil { + return "", errors.Wrapf(err, "failed to find %s's ip by node name", nodeName) } - if len(nodeNameIP) == 1 { - for i := 0; i < 2; i++ { - nodeNameIP = append(nodeNameIP, nodeNameIP[0]) + var addr string + for _, address := range n.Status.Addresses { + if address.Type == "InternalIP" { + addr = address.Address + return addr, nil } } - logger.Infof("using %v to deploy cluster", nodeNameIP) - - return nodeNameIP, nil -} - -// GetNodeHostNames returns the name of the node resource mapped to their hostname label. -// Typically these will be the same name, but sometimes they are not such as when nodes have a longer -// dns name, but the hostname is short. -func GetNodeHostNames(clientset kubernetes.Interface) (map[string]string, error) { - nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{}) - if err != nil { - return nil, err + if len(addr) == 0 { + return "", errors.Errorf("failed to get host ip of %s by node name", nodeName) } - nodeMap := map[string]string{} - for _, node := range nodes.Items { - nodeMap[node.Name] = node.Labels[v1.LabelHostname] - } - return nodeMap, nil + return "", nil } // GetValidNodes returns all nodes that are ready and is schedulable -func GetValidNodes(c clusterd.Context, storageNodes []string) ([]v1.Node, error) { - nodes := []v1.Node{} - for _, curveNode := range storageNodes { - n, err := c.Clientset.CoreV1().Nodes().Get(curveNode, metav1.GetOptions{}) +func GetValidNodes(clientset kubernetes.Interface, nodes []string) ([]v1.Node, error) { + validNodes := []v1.Node{} + for _, node := range nodes { + n, err := clientset.CoreV1().Nodes().Get(node, metav1.GetOptions{}) if err != nil { - logger.Errorf("failed to get node %v info", curveNode) - return nil, errors.Wrap(err, "failed to get node info by node name") + return nil, err } // not scheduled @@ -88,57 +54,12 @@ func GetValidNodes(c clusterd.Context, storageNodes []string) ([]v1.Node, error) // ready status for _, c := range n.Status.Conditions { if c.Type == v1.NodeReady && c.Status == v1.ConditionTrue { - nodes = append(nodes, *n) + validNodes = append(validNodes, *n) } } } - return nodes, nil -} - -func GetValidDaemonHosts(c clusterd.Context, cluster *curvev1.CurveCluster) ([]v1.Node, error) { - daemonHosts := cluster.Spec.Nodes - validDaemonHosts, err := GetValidNodes(c, daemonHosts) - return validDaemonHosts, err -} - -func GetValidFSDaemonHosts(c clusterd.Context, cluster *curvev1.Curvefs) ([]v1.Node, error) { - daemonHosts := cluster.Spec.Nodes - validDaemonHosts, err := GetValidNodes(c, daemonHosts) - return validDaemonHosts, err -} - -func GetValidChunkserverHosts(c clusterd.Context, curveCluster *curvev1.CurveCluster) ([]v1.Node, error) { - if !curveCluster.Spec.Storage.UseSelectedNodes { - chunkserverHosts := curveCluster.Spec.Storage.Nodes - validNodes, err := GetValidNodes(c, chunkserverHosts) - return validNodes, err - } - // useSelectedNodes == true - var chunkserverHosts []string - - for _, s := range curveCluster.Spec.Storage.SelectedNodes { - chunkserverHosts = append(chunkserverHosts, s.Node) - } - valiedChunkHosts, err := GetValidNodes(c, chunkserverHosts) - - return valiedChunkHosts, err -} - -func MergeNodesOfDaemonAndChunk(daemonHosts []v1.Node, chunkserverHosts []v1.Node) []v1.Node { - var nodes []v1.Node - nodes = append(nodes, daemonHosts...) - nodes = append(nodes, chunkserverHosts...) - - var retNodes []v1.Node - tmpMap := make(map[string]struct{}, len(nodes)) - for _, n := range nodes { - if _, ok := tmpMap[n.Name]; !ok { - tmpMap[n.Name] = struct{}{} - retNodes = append(retNodes, n) - } - } - return retNodes + return validNodes, nil } // TruncateNodeNameForJob hashes the nodeName in case it would case the name to be longer than 63 characters @@ -164,7 +85,6 @@ func Hash(s string) string { func truncateNodeName(format, nodeName string, maxLength int) string { if len(nodeName)+len(fmt.Sprintf(format, "")) > maxLength { hashed := Hash(nodeName) - logger.Infof("format and nodeName longer than %d chars, nodeName %s will be %s", maxLength, nodeName, hashed) nodeName = hashed } return fmt.Sprintf(format, nodeName) diff --git a/pkg/k8sutil/patch/annotation.go b/pkg/k8sutil/patch/annotation.go deleted file mode 100644 index d0ea567b..00000000 --- a/pkg/k8sutil/patch/annotation.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright © 2019 Banzai Cloud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package patch - -import ( - "archive/zip" - "bytes" - "encoding/base64" - "io" - "net/http" - - json "github.com/json-iterator/go" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" -) - -const LastAppliedConfig = "banzaicloud.com/last-applied" - -var DefaultAnnotator = NewAnnotator(LastAppliedConfig) - -type Annotator struct { - metadataAccessor meta.MetadataAccessor - key string -} - -func NewAnnotator(key string) *Annotator { - return &Annotator{ - key: key, - metadataAccessor: meta.NewAccessor(), - } -} - -// GetOriginalConfiguration retrieves the original configuration of the object -// from the annotation, or nil if no annotation was found. -func (a *Annotator) GetOriginalConfiguration(obj runtime.Object) ([]byte, error) { - annots, err := a.metadataAccessor.Annotations(obj) - if err != nil { - return nil, err - } - - if annots == nil { - return nil, nil - } - - original, ok := annots[a.key] - if !ok { - return nil, nil - } - - // Try to base64 decode, and fallback to non-base64 encoded content for backwards compatibility. - if decoded, err := base64.StdEncoding.DecodeString(original); err == nil { - if http.DetectContentType(decoded) == "application/zip" { - return unZipAnnotation(decoded) - } - return decoded, nil - } - - return []byte(original), nil -} - -// SetOriginalConfiguration sets the original configuration of the object -// as the annotation on the object for later use in computing a three way patch. -func (a *Annotator) SetOriginalConfiguration(obj runtime.Object, original []byte) error { - if len(original) < 1 { - return nil - } - - annots, err := a.metadataAccessor.Annotations(obj) - if err != nil { - return err - } - - if annots == nil { - annots = map[string]string{} - } - - annots[a.key], err = zipAndBase64EncodeAnnotation(original) - if err != nil { - return err - } - return a.metadataAccessor.SetAnnotations(obj, annots) -} - -// GetModifiedConfiguration retrieves the modified configuration of the object. -// If annotate is true, it embeds the result as an annotation in the modified -// configuration. If an object was read from the command input, it will use that -// version of the object. Otherwise, it will use the version from the server. -func (a *Annotator) GetModifiedConfiguration(obj runtime.Object, annotate bool) ([]byte, error) { - // First serialize the object without the annotation to prevent recursion, - // then add that serialization to it as the annotation and serialize it again. - var modified []byte - - // Otherwise, use the server side version of the object. - // Get the current annotations from the object. - annots, err := a.metadataAccessor.Annotations(obj) - if err != nil { - return nil, err - } - - if annots == nil { - annots = map[string]string{} - } - - original := annots[a.key] - delete(annots, a.key) - if err := a.metadataAccessor.SetAnnotations(obj, annots); err != nil { - return nil, err - } - - // Do not include an empty annotation map - if len(annots) == 0 { - a.metadataAccessor.SetAnnotations(obj, nil) - } - - modified, err = json.ConfigCompatibleWithStandardLibrary.Marshal(obj) - if err != nil { - return nil, err - } - - if annotate { - annots[a.key], err = zipAndBase64EncodeAnnotation(modified) - if err != nil { - return nil, err - } - if err := a.metadataAccessor.SetAnnotations(obj, annots); err != nil { - return nil, err - } - - modified, err = json.ConfigCompatibleWithStandardLibrary.Marshal(obj) - if err != nil { - return nil, err - } - } - - // Restore the object to its original condition. - annots[a.key] = original - if err := a.metadataAccessor.SetAnnotations(obj, annots); err != nil { - return nil, err - } - - return modified, nil -} - -// SetLastAppliedAnnotation gets the modified configuration of the object, -// without embedding it again, and then sets it on the object as the annotation. -func (a *Annotator) SetLastAppliedAnnotation(obj runtime.Object) error { - modified, err := a.GetModifiedConfiguration(obj, false) - if err != nil { - return err - } - // Remove nulls from json - modifiedWithoutNulls, _, err := DeleteNullInJson(modified) - if err != nil { - return err - } - return a.SetOriginalConfiguration(obj, modifiedWithoutNulls) -} - -func zipAndBase64EncodeAnnotation(original []byte) (string, error) { - // Create a buffer to write our archive to. - buf := new(bytes.Buffer) - - // Create a new zip archive. - w := zip.NewWriter(buf) - - f, err := w.Create("original") - if err != nil { - return "", err - } - _, err = f.Write(original) - if err != nil { - return "", err - } - - // Make sure to check the error on Close. - err = w.Close() - if err != nil { - return "", err - } - - return base64.StdEncoding.EncodeToString(buf.Bytes()), nil -} - -func unZipAnnotation(original []byte) ([]byte, error) { - annotation, err := io.ReadAll(bytes.NewReader(original)) - if err != nil { - return nil, err - } - - zipReader, err := zip.NewReader(bytes.NewReader(annotation), int64(len(annotation))) - if err != nil { - return nil, err - } - - // Read the file from zip archive - zipFile := zipReader.File[0] - unzippedFileBytes, err := readZipFile(zipFile) - if err != nil { - return nil, err - } - - return unzippedFileBytes, nil -} - -func readZipFile(zf *zip.File) ([]byte, error) { - f, err := zf.Open() - if err != nil { - return nil, err - } - defer f.Close() - return io.ReadAll(f) -} diff --git a/pkg/k8sutil/patch/deletenull.go b/pkg/k8sutil/patch/deletenull.go deleted file mode 100644 index d5784d83..00000000 --- a/pkg/k8sutil/patch/deletenull.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright © 2019 Banzai Cloud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package patch - -import ( - "reflect" - "unsafe" - - "emperror.dev/errors" - json "github.com/json-iterator/go" - "k8s.io/apimachinery/pkg/util/intstr" -) - -type CalculateOption func([]byte, []byte) ([]byte, []byte, error) - -func IgnoreStatusFields() CalculateOption { - return func(current, modified []byte) ([]byte, []byte, error) { - current, err := deleteStatusField(current) - if err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not delete status field from current byte sequence") - } - - modified, err = deleteStatusField(modified) - if err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not delete status field from modified byte sequence") - } - - return current, modified, nil - } -} - -func IgnoreField(field string) CalculateOption { - return func(current, modified []byte) ([]byte, []byte, error) { - current, err := deleteDataField(current, field) - if err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not delete the field from current byte sequence") - } - - modified, err = deleteDataField(modified, field) - if err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not delete the field from modified byte sequence") - } - - return current, modified, nil - } -} - -func IgnoreVolumeClaimTemplateTypeMetaAndStatus() CalculateOption { - return func(current, modified []byte) ([]byte, []byte, error) { - current, err := deleteVolumeClaimTemplateFields(current) - if err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not delete status field from current byte sequence") - } - - modified, err = deleteVolumeClaimTemplateFields(modified) - if err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not delete status field from modified byte sequence") - } - - return current, modified, nil - } -} - -func init() { - // k8s.io/apimachinery/pkg/util/intstr.IntOrString behaves really badly - // from JSON marshaling point of view, it can't be empty basically. - // So we need to override the defined marshaling behaviour and write nil - // instead of 0, because usually (in all observed cases) 0 means "not set" - // for IntOrStr types. - // To make this happen we need to pull in json-iterator and override the - // factory marshaling overrides. - json.RegisterTypeEncoderFunc("intstr.IntOrString", - func(ptr unsafe.Pointer, stream *json.Stream) { - i := (*intstr.IntOrString)(ptr) - if i.IntValue() == 0 { - if i.StrVal != "" && i.StrVal != "0" { - stream.WriteString(i.StrVal) - } else { - stream.WriteNil() - } - } else { - stream.WriteInt(i.IntValue()) - } - }, - func(ptr unsafe.Pointer) bool { - i := (*intstr.IntOrString)(ptr) - return i.IntValue() == 0 && (i.StrVal == "" || i.StrVal == "0") - }, - ) -} - -func DeleteNullInJson(jsonBytes []byte) ([]byte, map[string]interface{}, error) { - var patchMap map[string]interface{} - - err := json.Unmarshal(jsonBytes, &patchMap) - if err != nil { - return nil, nil, errors.Wrap(err, "could not unmarshal json patch") - } - - filteredMap, err := deleteNullInObj(patchMap) - if err != nil { - return nil, nil, errors.Wrap(err, "could not delete null values from patch map") - } - - o, err := json.ConfigCompatibleWithStandardLibrary.Marshal(filteredMap) - if err != nil { - return nil, nil, errors.Wrap(err, "could not marshal filtered patch map") - } - - return o, filteredMap, err -} - -func deleteNullInObj(m map[string]interface{}) (map[string]interface{}, error) { - var err error - filteredMap := make(map[string]interface{}) - - for key, val := range m { - if val == nil || isZero(reflect.ValueOf(val)) { - continue - } - switch typedVal := val.(type) { - default: - return nil, errors.Errorf("unknown type: %v", reflect.TypeOf(typedVal)) - case []interface{}: - slice, err := deleteNullInSlice(typedVal) - if err != nil { - return nil, errors.Errorf("could not delete null values from subslice") - } - filteredMap[key] = slice - case string, float64, bool, int64, nil: - filteredMap[key] = val - case map[string]interface{}: - if len(typedVal) == 0 { - filteredMap[key] = typedVal - continue - } - - var filteredSubMap map[string]interface{} - filteredSubMap, err = deleteNullInObj(typedVal) - if err != nil { - return nil, errors.Wrap(err, "could not delete null values from filtered sub map") - } - - if len(filteredSubMap) != 0 { - filteredMap[key] = filteredSubMap - } - } - } - return filteredMap, nil -} - -func deleteNullInSlice(m []interface{}) ([]interface{}, error) { - filteredSlice := make([]interface{}, len(m)) - for key, val := range m { - if val == nil { - continue - } - switch typedVal := val.(type) { - default: - return nil, errors.Errorf("unknown type: %v", reflect.TypeOf(typedVal)) - case []interface{}: - filteredSubSlice, err := deleteNullInSlice(typedVal) - if err != nil { - return nil, errors.Errorf("could not delete null values from subslice") - } - filteredSlice[key] = filteredSubSlice - case string, float64, bool, int64, nil: - filteredSlice[key] = val - case map[string]interface{}: - filteredMap, err := deleteNullInObj(typedVal) - if err != nil { - return nil, errors.Wrap(err, "could not delete null values from filtered sub map") - } - filteredSlice[key] = filteredMap - } - } - return filteredSlice, nil -} - -func deleteDataField(obj []byte, fieldName string) ([]byte, error) { - var objectMap map[string]interface{} - err := json.Unmarshal(obj, &objectMap) - if err != nil { - return []byte{}, errors.Wrap(err, "could not unmarshal byte sequence") - } - delete(objectMap, fieldName) - obj, err = json.ConfigCompatibleWithStandardLibrary.Marshal(objectMap) - if err != nil { - return []byte{}, errors.Wrap(err, "could not marshal byte sequence") - } - - return obj, nil -} - -func deleteStatusField(obj []byte) ([]byte, error) { - var objectMap map[string]interface{} - err := json.Unmarshal(obj, &objectMap) - if err != nil { - return []byte{}, errors.Wrap(err, "could not unmarshal byte sequence") - } - delete(objectMap, "status") - obj, err = json.ConfigCompatibleWithStandardLibrary.Marshal(objectMap) - if err != nil { - return []byte{}, errors.Wrap(err, "could not marshal byte sequence") - } - - return obj, nil -} - -func deleteVolumeClaimTemplateFields(obj []byte) ([]byte, error) { - resource := map[string]interface{}{} - err := json.Unmarshal(obj, &resource) - if err != nil { - return []byte{}, errors.Wrap(err, "could not unmarshal byte sequence") - } - - if spec, ok := resource["spec"]; ok { - if spec, ok := spec.(map[string]interface{}); ok { - if vcts, ok := spec["volumeClaimTemplates"]; ok { - if vcts, ok := vcts.([]interface{}); ok { - for _, vct := range vcts { - if vct, ok := vct.(map[string]interface{}); ok { - vct["kind"] = "" - vct["apiVersion"] = "" - vct["status"] = map[string]string{ - "phase": "Pending", - } - } - } - } - } - } - } - - obj, err = json.ConfigCompatibleWithStandardLibrary.Marshal(resource) - if err != nil { - return []byte{}, errors.Wrap(err, "could not marshal byte sequence") - } - - return obj, nil -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - default: - z := reflect.Zero(v.Type()) - return v.Interface() == z.Interface() - case reflect.Float64, reflect.Int64, reflect.Bool: - return false - case reflect.Func, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - z := true - for i := 0; i < v.Len(); i++ { - z = z && isZero(v.Index(i)) - } - return z - case reflect.Struct: - z := true - for i := 0; i < v.NumField(); i++ { - z = z && isZero(v.Field(i)) - } - return z - } -} diff --git a/pkg/k8sutil/patch/ignorepdb.go b/pkg/k8sutil/patch/ignorepdb.go deleted file mode 100644 index ac433429..00000000 --- a/pkg/k8sutil/patch/ignorepdb.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright © 2021 Banzai Cloud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package patch - -import ( - "reflect" - "strings" - - "emperror.dev/errors" - json "github.com/json-iterator/go" -) - -func IgnorePDBSelector() CalculateOption { - return func(current, modified []byte) ([]byte, []byte, error) { - currentResource := map[string]interface{}{} - if err := json.Unmarshal(current, ¤tResource); err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not unmarshal byte sequence for current") - } - - modifiedResource := map[string]interface{}{} - if err := json.Unmarshal(modified, &modifiedResource); err != nil { - return []byte{}, []byte{}, errors.Wrap(err, "could not unmarshal byte sequence for modified") - } - - if isPDB(currentResource) && isPDB(modifiedResource) && reflect.DeepEqual(getPDBSelector(currentResource), getPDBSelector(modifiedResource)) { - var err error - current, err = deletePDBSelector(currentResource) - if err != nil { - return nil, nil, errors.Wrap(err, "delete pdb selector from current") - } - modified, err = deletePDBSelector(modifiedResource) - if err != nil { - return nil, nil, errors.Wrap(err, "delete pdb selector from modified") - } - } - - return current, modified, nil - } -} - -func isPDB(resource map[string]interface{}) bool { - if av, ok := resource["apiVersion"].(string); ok { - return strings.HasPrefix(av, "policy/") && resource["kind"] == "PodDisruptionBudget" - } - return false -} - -func getPDBSelector(resource map[string]interface{}) interface{} { - if spec, ok := resource["spec"]; ok { - if spec, ok := spec.(map[string]interface{}); ok { - if selector, ok := spec["selector"]; ok { - return selector - } - } - } - return nil -} - -func deletePDBSelector(resource map[string]interface{}) ([]byte, error) { - if spec, ok := resource["spec"]; ok { - if spec, ok := spec.(map[string]interface{}); ok { - delete(spec, "selector") - } - } - - obj, err := json.ConfigCompatibleWithStandardLibrary.Marshal(resource) - if err != nil { - return []byte{}, errors.Wrap(err, "could not marshal byte sequence") - } - - return obj, nil -} diff --git a/pkg/k8sutil/patch/patch.go b/pkg/k8sutil/patch/patch.go deleted file mode 100644 index e42e870c..00000000 --- a/pkg/k8sutil/patch/patch.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright © 2019 Banzai Cloud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package patch - -import ( - "fmt" - - "emperror.dev/errors" - json "github.com/json-iterator/go" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" -) - -var DefaultPatchMaker = NewPatchMaker(DefaultAnnotator, &K8sStrategicMergePatcher{}, &BaseJSONMergePatcher{}) - -type Maker interface { - Calculate(currentObject, modifiedObject runtime.Object, opts ...CalculateOption) (*PatchResult, error) -} - -type PatchMaker struct { - annotator *Annotator - - strategicMergePatcher StrategicMergePatcher - jsonMergePatcher JSONMergePatcher -} - -func NewPatchMaker(annotator *Annotator, strategicMergePatcher StrategicMergePatcher, jsonMergePatcher JSONMergePatcher) Maker { - return &PatchMaker{ - annotator: annotator, - - strategicMergePatcher: strategicMergePatcher, - jsonMergePatcher: jsonMergePatcher, - } -} - -func (p *PatchMaker) Calculate(currentObject, modifiedObject runtime.Object, opts ...CalculateOption) (*PatchResult, error) { - current, err := json.ConfigCompatibleWithStandardLibrary.Marshal(currentObject) - if err != nil { - return nil, errors.Wrap(err, "Failed to convert current object to byte sequence") - } - - modified, err := json.ConfigCompatibleWithStandardLibrary.Marshal(modifiedObject) - if err != nil { - return nil, errors.Wrap(err, "Failed to convert current object to byte sequence") - } - - for _, opt := range opts { - current, modified, err = opt(current, modified) - if err != nil { - return nil, errors.Wrap(err, "Failed to apply option function") - } - } - - current, _, err = DeleteNullInJson(current) - if err != nil { - return nil, errors.Wrap(err, "Failed to delete null from current object") - } - - modified, _, err = DeleteNullInJson(modified) - if err != nil { - return nil, errors.Wrap(err, "Failed to delete null from modified object") - } - - original, err := p.annotator.GetOriginalConfiguration(currentObject) - if err != nil { - return nil, errors.Wrap(err, "Failed to get original configuration") - } - - var patch []byte - - switch currentObject.(type) { - default: - patch, err = p.strategicMergePatcher.CreateThreeWayMergePatch(original, modified, current, currentObject) - if err != nil { - return nil, errors.Wrap(err, "Failed to generate strategic merge patch") - } - // $setElementOrder can make it hard to decide whether there is an actual diff or not. - // In cases like that trying to apply the patch locally on current will make it clear. - if string(patch) != "{}" { - patchCurrent, err := p.strategicMergePatcher.StrategicMergePatch(current, patch, currentObject) - if err != nil { - return nil, errors.Wrap(err, "Failed to apply patch again to check for an actual diff") - } - patch, err = p.strategicMergePatcher.CreateTwoWayMergePatch(current, patchCurrent, currentObject) - if err != nil { - return nil, errors.Wrap(err, "Failed to create patch again to check for an actual diff") - } - } - case *unstructured.Unstructured: - patch, err = p.unstructuredJsonMergePatch(original, modified, current) - if err != nil { - return nil, errors.Wrap(err, "Failed to generate merge patch") - } - } - - return &PatchResult{ - Patch: patch, - Current: current, - Modified: modified, - Original: original, - }, nil -} - -func (p *PatchMaker) unstructuredJsonMergePatch(original, modified, current []byte) ([]byte, error) { - patch, err := p.jsonMergePatcher.CreateThreeWayJSONMergePatch(original, modified, current) - if err != nil { - return nil, errors.Wrap(err, "Failed to generate merge patch") - } - // Apply the patch to the current object and create a merge patch to see if there has any effective changes been made - if string(patch) != "{}" { - // apply the patch - patchedCurrent, err := p.jsonMergePatcher.MergePatch(current, patch) - if err != nil { - return nil, errors.Wrap(err, "Failed to merge generated patch to current object") - } - // create the patch again, but now between the current and the patched version of the current object - patch, err = p.jsonMergePatcher.CreateMergePatch(current, patchedCurrent) - if err != nil { - return nil, errors.Wrap(err, "Failed to create patch between the current and patched current object") - } - } - return patch, err -} - -type PatchResult struct { - Patch []byte - Current []byte - Modified []byte - Original []byte -} - -func (p *PatchResult) IsEmpty() bool { - return string(p.Patch) == "{}" -} - -func (p *PatchResult) String() string { - return fmt.Sprintf("\nPatch: %s \nCurrent: %s\nModified: %s\nOriginal: %s\n", p.Patch, p.Current, p.Modified, p.Original) -} diff --git a/pkg/k8sutil/patch/patch_makers.go b/pkg/k8sutil/patch/patch_makers.go deleted file mode 100644 index fcabee61..00000000 --- a/pkg/k8sutil/patch/patch_makers.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright © 2022 Banzai Cloud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package patch - -import ( - "emperror.dev/errors" - jsonpatch "github.com/evanphx/json-patch" - "k8s.io/apimachinery/pkg/util/jsonmergepatch" - "k8s.io/apimachinery/pkg/util/mergepatch" - "k8s.io/apimachinery/pkg/util/strategicpatch" -) - -type StrategicMergePatcher interface { - StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) - CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) - CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}) ([]byte, error) -} - -type JSONMergePatcher interface { - MergePatch(docData, patchData []byte) ([]byte, error) - CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) - CreateThreeWayJSONMergePatch(original, modified, current []byte) ([]byte, error) -} - -type K8sStrategicMergePatcher struct { - PreconditionFuncs []mergepatch.PreconditionFunc -} - -func (p *K8sStrategicMergePatcher) StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { - return strategicpatch.StrategicMergePatch(original, patch, dataStruct) -} - -func (p *K8sStrategicMergePatcher) CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) { - return strategicpatch.CreateTwoWayMergePatch(original, modified, dataStruct, p.PreconditionFuncs...) -} - -func (p *K8sStrategicMergePatcher) CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}) ([]byte, error) { - lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, errors.WrapWithDetails(err, "Failed to lookup patch meta", "current object", dataStruct) - } - - return strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, true, p.PreconditionFuncs...) -} - -type BaseJSONMergePatcher struct{} - -func (p *BaseJSONMergePatcher) MergePatch(docData, patchData []byte) ([]byte, error) { - return jsonpatch.MergePatch(docData, patchData) -} - -func (p *BaseJSONMergePatcher) CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) -} - -func (p *BaseJSONMergePatcher) CreateThreeWayJSONMergePatch(original, modified, current []byte) ([]byte, error) { - return jsonmergepatch.CreateThreeWayJSONMergePatch(original, modified, current) -} diff --git a/pkg/k8sutil/pod.go b/pkg/k8sutil/pod.go index e33bf506..a3664a6a 100644 --- a/pkg/k8sutil/pod.go +++ b/pkg/k8sutil/pod.go @@ -12,7 +12,7 @@ func GetPodsByLabelSelector(clientset kubernetes.Interface, namespace string, se LabelSelector: selector, }) if err != nil { - return &v1.PodList{}, errors.Wrap(err, "failed to list pods by LabelSelector") + return &v1.PodList{}, errors.Wrapf(err, "failed to list pods by LabelSelector %s", selector) } return pods, nil } diff --git a/pkg/k8sutil/status.go b/pkg/k8sutil/status.go new file mode 100644 index 00000000..9308d35c --- /dev/null +++ b/pkg/k8sutil/status.go @@ -0,0 +1,6 @@ +package k8sutil + +// TODO: +func retryUpdateStatus() { + +} diff --git a/pkg/logrotate/logrotate.go b/pkg/logrotate/logrotate.go deleted file mode 100644 index 13538c84..00000000 --- a/pkg/logrotate/logrotate.go +++ /dev/null @@ -1,65 +0,0 @@ -package logrotate - -import ( - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/opencurve/curve-operator/pkg/daemon" -) - -func CreateLogrotateConfigMap(c *daemon.Cluster) error { - - logrotateConfMapData := `/logs/* { - rotate 5 - missingok - compress - copytruncate - dateext - createolddir - olddir /logs/old - size 10m - notifempty - }` - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "log-conf", - Namespace: c.Namespace, - }, - Data: map[string]string{ - "logrotate.conf": logrotateConfMapData, - }, - } - - err := c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return err - } - - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create %s configmap in namespace %s", cm.Name, c.Namespace) - } - - return nil -} - -func MakeLogrotateContainer() v1.Container { - container := v1.Container{ - Name: "logrotate", - Image: "linkyard/logrotate:1.0.0", - VolumeMounts: []v1.VolumeMount{ - { - Name: "log-volume", - MountPath: "/logs", - }, - { - Name: "log-conf", - MountPath: "/etc/logrotate.conf", - SubPath: "logrotate.conf", - }, - }, - } - return container -} diff --git a/pkg/mds/config.go b/pkg/mds/config.go deleted file mode 100644 index 507cb969..00000000 --- a/pkg/mds/config.go +++ /dev/null @@ -1,51 +0,0 @@ -package mds - -import "github.com/opencurve/curve-operator/pkg/config" - -// mdsConfig implements config.ConfigInterface -var _ config.ConfigInterface = &mdsConfig{} - -// mdsConfig for a single mds -type mdsConfig struct { - Prefix string - ServiceAddr string - ServicePort string - ServiceDummyPort string - ClusterEtcdAddr string - ClusterSnapshotcloneProxyAddr string - - ResourceName string - CurrentConfigMapName string - DaemonID string - DataPathMap *config.DataPathMap - ConfigMapMountPath string -} - -func (c *mdsConfig) GetPrefix() string { return c.Prefix } -func (c *mdsConfig) GetServiceId() string { return "" } -func (c *mdsConfig) GetServiceRole() string { return "mds" } -func (c *mdsConfig) GetServiceHost() string { return "" } -func (c *mdsConfig) GetServiceHostSequence() string { return "" } -func (c *mdsConfig) GetServiceReplicaSequence() string { return "" } -func (c *mdsConfig) GetServiceReplicasSequence() string { return "" } -func (c *mdsConfig) GetServiceAddr() string { return c.ServiceAddr } -func (c *mdsConfig) GetServicePort() string { return c.ServicePort } -func (c *mdsConfig) GetServiceClientPort() string { return "" } -func (c *mdsConfig) GetServiceDummyPort() string { return c.ServiceDummyPort } -func (c *mdsConfig) GetServiceProxyPort() string { return "" } -func (c *mdsConfig) GetServiceExternalAddr() string { return "" } -func (c *mdsConfig) GetServiceExternalPort() string { return "" } -func (c *mdsConfig) GetLogDir() string { return "" } -func (c *mdsConfig) GetDataDir() string { return "" } - -func (c *mdsConfig) GetClusterEtcdHttpAddr() string { return "" } -func (c *mdsConfig) GetClusterEtcdAddr() string { return c.ClusterEtcdAddr } -func (c *mdsConfig) GetClusterMdsAddr() string { return "" } -func (c *mdsConfig) GetClusterMdsDummyAddr() string { return "" } -func (c *mdsConfig) GetClusterMdsDummyPort() string { return "" } -func (c *mdsConfig) GetClusterChunkserverAddr() string { return "" } -func (c *mdsConfig) GetClusterMetaserverAddr() string { return "" } -func (c *mdsConfig) GetClusterSnapshotcloneAddr() string { return "" } -func (c *mdsConfig) GetClusterSnapshotcloneProxyAddr() string { return c.ClusterSnapshotcloneProxyAddr } -func (c *mdsConfig) GetClusterSnapshotcloneDummyPort() string { return "" } -func (c *mdsConfig) GetClusterSnapshotcloneNginxUpstream() string { return "" } diff --git a/pkg/mds/mds.go b/pkg/mds/mds.go deleted file mode 100644 index 6b2fec5c..00000000 --- a/pkg/mds/mds.go +++ /dev/null @@ -1,160 +0,0 @@ -package mds - -import ( - "context" - "fmt" - "path" - "strconv" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" -) - -const ( - AppName = "curve-mds" - ConfigMapNamePrefix = "curve-mds-conf" - - // Prefix is the mount path of data and log - Prefix = "/curvebs/mds" - ContainerDataDir = "/curvebs/mds/data" - ContainerLogDir = "/curvebs/mds/logs" - - FSPrefix = "/curvefs/mds" - FSContainerDataDir = "/curvefs/mds/data" - FSContainerLogDir = "/curvefs/mds/logs" -) - -type Cluster struct { - *daemon.Cluster -} - -func New(c *daemon.Cluster) *Cluster { - return &Cluster{Cluster: c} -} - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "mds") - -// Start Curve mds daemon -func (c *Cluster) Start(nodesInfo []daemon.NodeInfo, dcs []*topology.DeployConfig) ([]*topology.DeployConfig, error) { - overrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.EtcdOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - clusterEtcdAddr := overrideCM.Data[config.ClusterEtcdAddr] - - var mdsEndpoints, clusterMdsDummyAddr, clusterMdsDummyPort string - for _, node := range nodesInfo { - mdsEndpoints = fmt.Sprint(mdsEndpoints, node.NodeIP, ":", node.MdsPort, ",") - clusterMdsDummyAddr = fmt.Sprint(clusterMdsDummyAddr, node.NodeIP, ":", node.DummyPort, ",") - clusterMdsDummyPort = fmt.Sprint(clusterMdsDummyPort, node.DummyPort, ",") - } - mdsEndpoints = strings.TrimRight(mdsEndpoints, ",") - clusterMdsDummyAddr = strings.TrimRight(clusterMdsDummyAddr, ",") - clusterMdsDummyPort = strings.TrimRight(clusterMdsDummyPort, ",") - - // create mds override configmap to record mds endpoints - err = c.createOverrideMdsCM(mdsEndpoints, clusterMdsDummyAddr, clusterMdsDummyPort) - if err != nil { - return nil, err - } - - var configMapMountPath, prefix, containerDataDir, containerLogDir string - if c.Kind == config.KIND_CURVEBS { - prefix = Prefix - containerDataDir = ContainerDataDir - containerLogDir = ContainerLogDir - configMapMountPath = config.MdsConfigMapMountPathDir - } else { - prefix = FSPrefix - containerDataDir = FSContainerDataDir - containerLogDir = FSContainerLogDir - configMapMountPath = config.FSMdsConfigMapMountPathDir - } - - var deploymentsToWaitFor []*appsv1.Deployment - - var daemonIDString string - for _, node := range nodesInfo { - daemonIDString = k8sutil.IndexToName(node.HostID) - resourceName := fmt.Sprintf("%s-%s", AppName, daemonIDString) - currentConfigMapName := fmt.Sprintf("%s-%s", ConfigMapNamePrefix, daemonIDString) - - mdsConfig := &mdsConfig{ - Prefix: prefix, - ServiceAddr: node.NodeIP, - ServicePort: strconv.Itoa(node.MdsPort), - ServiceDummyPort: strconv.Itoa(node.DummyPort), - ClusterEtcdAddr: clusterEtcdAddr, - ClusterSnapshotcloneProxyAddr: "", - - DaemonID: daemonIDString, - ResourceName: resourceName, - CurrentConfigMapName: currentConfigMapName, - DataPathMap: config.NewDaemonDataPathMap( - path.Join(c.DataDirHostPath, fmt.Sprint("mds-", daemonIDString)), - path.Join(c.LogDirHostPath, fmt.Sprint("mds-", daemonIDString)), - containerDataDir, - containerLogDir, - ), - ConfigMapMountPath: configMapMountPath, - } - dc := &topology.DeployConfig{ - Kind: c.Kind, - Role: config.ROLE_MDS, - NodeName: node.NodeName, - NodeIP: node.NodeIP, - Port: node.MdsPort, - ReplicasSequence: node.ReplicasSequence, - Replicas: len(c.Nodes), - StandAlone: node.StandAlone, - } - dcs = append(dcs, dc) - - err := c.CreateEachConfigMap(config.MdsConfigMapDataKey, mdsConfig, currentConfigMapName) - if err != nil { - return nil, err - } - - d, err := c.makeDeployment(node.NodeName, node.NodeIP, mdsConfig) - if err != nil { - return nil, err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return nil, errors.Wrapf(err, "failed to create mds deployment %s", resourceName) - } - logger.Infof("deployment for mds %s already exists. updating if needed", resourceName) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return nil, err - } - } - - k8sutil.UpdateStatusCondition(c.Kind, context.TODO(), &c.Context, c.NamespacedName, curvev1.ConditionTypeMdsReady, curvev1.ConditionTrue, curvev1.ConditionMdsClusterCreatedReason, "MDS cluster has been created") - - return dcs, nil -} diff --git a/pkg/mds/spec.go b/pkg/mds/spec.go deleted file mode 100644 index 188372df..00000000 --- a/pkg/mds/spec.go +++ /dev/null @@ -1,159 +0,0 @@ -package mds - -import ( - "fmt" - "path" - "strconv" - - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/logrotate" -) - -// createOverrideMdsCM create mds-endpoints-override configmap to record mds endpoints -func (c *Cluster) createOverrideMdsCM(mdsEndpoints, clusterMdsDummyAddr, clusterMdsDummyPort string) error { - - mdsConfigMapData := map[string]string{ - config.MdsOvverideConfigMapDataKey: mdsEndpoints, - config.ClusterMdsDummyAddr: clusterMdsDummyAddr, - config.ClusterMdsDummyPort: clusterMdsDummyPort, - } - - // create mds override configMap to record the endpoints of etcd - mdsOverrideCM := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.MdsOverrideConfigMapName, - Namespace: c.NamespacedName.Namespace, - }, - Data: mdsConfigMapData, - } - - err := c.OwnerInfo.SetControllerReference(mdsOverrideCM) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to mds override configmap %q", config.MdsOverrideConfigMapName) - } - - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(mdsOverrideCM) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create override configmap %s", c.NamespacedName.Namespace) - } - logger.Infof("ConfigMap for override mds endpoints %s already exists. updating if needed", config.MdsOverrideConfigMapName) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.Context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("ConfigMap %s for override mds endpoints has been created", config.MdsOverrideConfigMapName) - } - - return nil -} - -// makeDeployment make mds deployment to run mds daemon -func (c *Cluster) makeDeployment(nodeName string, nodeIP string, mdsConfig *mdsConfig) (*apps.Deployment, error) { - volumes := daemon.DaemonVolumes(config.MdsConfigMapDataKey, mdsConfig.ConfigMapMountPath, mdsConfig.DataPathMap, mdsConfig.CurrentConfigMapName) - labels := daemon.CephDaemonAppLabels(AppName, c.Namespace, "mds", mdsConfig.DaemonID, c.Kind) - - // add log config volume - logConfCMVolSource := &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "log-conf"}} - volumes = append(volumes, v1.Volume{Name: "log-conf", VolumeSource: v1.VolumeSource{ConfigMap: logConfCMVolSource}}) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: mdsConfig.ResourceName, - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.makeMdsDaemonContainer(nodeIP, mdsConfig), - logrotate.MakeLogrotateContainer(), - }, - NodeName: nodeName, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: mdsConfig.ResourceName, - Namespace: c.NamespacedName.Namespace, - Labels: labels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon deployment %q", d.Name) - } - - return d, nil -} - -// makeMdsDaemonContainer create mds container -func (c *Cluster) makeMdsDaemonContainer(nodeIP string, mdsConfig *mdsConfig) v1.Container { - port, _ := strconv.Atoi(mdsConfig.ServicePort) - dummyPort, _ := strconv.Atoi(mdsConfig.ServiceDummyPort) - var commandLine string - if c.Kind == config.KIND_CURVEBS { - commandLine = "/curvebs/mds/sbin/curvebs-mds" - } else { - commandLine = "/curvefs/mds/sbin/curvefs-mds" - } - - configFileMountPath := path.Join(mdsConfig.ConfigMapMountPath, config.MdsConfigMapDataKey) - argsConfigFileDir := fmt.Sprintf("--confPath=%s", configFileMountPath) - - container := v1.Container{ - Name: "mds", - Command: []string{ - commandLine, - }, - Args: []string{ - argsConfigFileDir, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: daemon.DaemonVolumeMounts(config.MdsConfigMapDataKey, mdsConfig.ConfigMapMountPath, mdsConfig.DataPathMap, mdsConfig.CurrentConfigMapName), - Ports: []v1.ContainerPort{ - { - Name: "listen-port", - ContainerPort: int32(port), - HostPort: int32(port), - Protocol: v1.ProtocolTCP, - }, - { - Name: "dummy-port", - ContainerPort: int32(dummyPort), - HostPort: int32(dummyPort), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - - return container -} diff --git a/pkg/metaserver/config.go b/pkg/metaserver/config.go deleted file mode 100644 index 422e7c83..00000000 --- a/pkg/metaserver/config.go +++ /dev/null @@ -1,55 +0,0 @@ -package metaserver - -import "github.com/opencurve/curve-operator/pkg/config" - -// mdsConfig implements config.ConfigInterface -var _ config.ConfigInterface = &metaserverConfig{} - -type metaserverConfig struct { - Prefix string - ServiceAddr string - ServicePort string - ServiceExternalAddr string - ServiceExternalPort string - ClusterEtcdAddr string - ClusterMdsAddr string - ClusterMdsDummyAddr string - ClusterMetaserverAddr string - - ResourceName string - CurrentConfigMapName string - DaemonID string - NodeName string - NodeIP string - DataPathMap *config.DataPathMap -} - -func (c *metaserverConfig) GetPrefix() string { return c.Prefix } -func (c *metaserverConfig) GetServiceId() string { return "" } -func (c *metaserverConfig) GetServiceRole() string { return "metaserver" } -func (c *metaserverConfig) GetServiceHost() string { return "" } -func (c *metaserverConfig) GetServiceHostSequence() string { return "" } -func (c *metaserverConfig) GetServiceReplicaSequence() string { return "" } -func (c *metaserverConfig) GetServiceReplicasSequence() string { return "" } -func (c *metaserverConfig) GetServiceAddr() string { return c.ServiceAddr } -func (c *metaserverConfig) GetServicePort() string { return c.ServicePort } -func (c *metaserverConfig) GetServiceClientPort() string { return "" } -func (c *metaserverConfig) GetServiceDummyPort() string { return "" } -func (c *metaserverConfig) GetServiceProxyPort() string { return "" } -func (c *metaserverConfig) GetServiceExternalAddr() string { return c.ServiceExternalAddr } -func (c *metaserverConfig) GetServiceExternalPort() string { return c.ServiceExternalPort } -func (c *metaserverConfig) GetLogDir() string { return "" } -func (c *metaserverConfig) GetDataDir() string { return "" } - -// cluster -func (c *metaserverConfig) GetClusterEtcdHttpAddr() string { return "" } -func (c *metaserverConfig) GetClusterEtcdAddr() string { return c.ClusterEtcdAddr } -func (c *metaserverConfig) GetClusterMdsAddr() string { return c.ClusterMdsAddr } -func (c *metaserverConfig) GetClusterMdsDummyAddr() string { return c.ClusterMdsDummyAddr } -func (c *metaserverConfig) GetClusterMdsDummyPort() string { return "" } -func (c *metaserverConfig) GetClusterChunkserverAddr() string { return "" } -func (c *metaserverConfig) GetClusterMetaserverAddr() string { return c.ClusterMetaserverAddr } -func (c *metaserverConfig) GetClusterSnapshotcloneAddr() string { return "" } -func (c *metaserverConfig) GetClusterSnapshotcloneProxyAddr() string { return "" } -func (c *metaserverConfig) GetClusterSnapshotcloneDummyPort() string { return "" } -func (c *metaserverConfig) GetClusterSnapshotcloneNginxUpstream() string { return "" } diff --git a/pkg/metaserver/metaserver.go b/pkg/metaserver/metaserver.go deleted file mode 100644 index 1e1363ce..00000000 --- a/pkg/metaserver/metaserver.go +++ /dev/null @@ -1,177 +0,0 @@ -package metaserver - -import ( - "context" - "fmt" - "path" - "strconv" - "strings" - - "github.com/coreos/pkg/capnslog" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - curvev1 "github.com/opencurve/curve-operator/api/v1" -) - -const ( - AppName = "curve-metaserver" - ConfigMapNamePrefix = "curve-metaserver" - - FSPrefix = "/curvefs/metaserver" - FSContainerDataDir = "/curvefs/metaserver/data" - FSContainerLogDir = "/curvefs/metaserver/logs" -) - -type Cluster struct { - *daemon.Cluster -} - -func New(c *daemon.Cluster) *Cluster { - return &Cluster{Cluster: c} -} - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "metaserver") - -func (c *Cluster) Start(nodesInfo []daemon.NodeInfo, globalDCs []*topology.DeployConfig) ([]*topology.DeployConfig, error) { - msConfigs, dcs, globalDCs, err := c.buildConfigs(nodesInfo, globalDCs) - if err != nil { - return nil, err - } - - // create tool ConfigMap - if err := c.CreateEachConfigMap(config.ToolsConfigMapDataKey, msConfigs[0], config.ToolsConfigMapName); err != nil { - return nil, err - } - - // create topology ConfigMap - if err := topology.CreateTopoConfigMap(c.Cluster, dcs); err != nil { - return nil, err - } - - // create logic pool - _, err = topology.RunCreatePoolJob(c.Cluster, dcs, topology.LOGICAL_POOL) - if err != nil { - return nil, err - } - - var deploymentsToWaitFor []*appsv1.Deployment - for _, msConfig := range msConfigs { - if err := c.CreateEachConfigMap(config.MetaServerConfigMapDataKey, msConfig, msConfig.CurrentConfigMapName); err != nil { - return nil, err - } - d, err := c.makeDeployment(msConfig, msConfig.NodeName, msConfig.NodeIP) - if err != nil { - return nil, err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return nil, errors.Wrapf(err, "failed to create mds deployment %s", msConfig.ResourceName) - } - logger.Infof("deployment for mds %s already exists. updating if needed", msConfig.ResourceName) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.Context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return nil, err - } - } - - k8sutil.UpdateStatusCondition(c.Kind, context.TODO(), &c.Context, c.NamespacedName, curvev1.ConditionTypeMetaServerReady, curvev1.ConditionTrue, curvev1.ConditionMetaServerClusterCreatedReason, "MetaServer cluster has been created") - return globalDCs, nil -} - -// Start Curve metaserver daemon -func (c *Cluster) buildConfigs(nodesInfo []daemon.NodeInfo, globalDCs []*topology.DeployConfig) ([]*metaserverConfig, []*topology.DeployConfig, []*topology.DeployConfig, error) { - logger.Infof("starting to run metaserver in namespace %q", c.NamespacedName.Namespace) - - // get ClusterEtcdAddr - etcdOverrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.EtcdOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to get etcd override endoints configmap") - } - clusterEtcdAddr := etcdOverrideCM.Data[config.ClusterEtcdAddr] - - // get ClusterMdsAddr - mdsOverrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.MdsOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to get mds override endoints configmap") - } - clusterMdsAddr := mdsOverrideCM.Data[config.MdsOvverideConfigMapDataKey] - clusterMdsDummyAddr := mdsOverrideCM.Data[config.ClusterMdsDummyAddr] - - // get clusterMetaserverAddr - metaserveraddr := []string{} - for _, node := range nodesInfo { - metaserveraddr = append(metaserveraddr, fmt.Sprint(node.NodeIP, ":", strconv.Itoa(node.MetaserverPort))) - } - clusterMetaserverAddr := strings.Join(metaserveraddr, ",") - logger.Info("clusterMetaserverAddr is ", clusterMetaserverAddr) - - metaserverConfigs := []*metaserverConfig{} - dcs := []*topology.DeployConfig{} - for _, node := range nodesInfo { - daemonIDString := k8sutil.IndexToName(node.HostID) - resourceName := fmt.Sprintf("%s-%s", AppName, daemonIDString) - currentConfigMapName := fmt.Sprintf("%s-%s", ConfigMapNamePrefix, daemonIDString) - - metaserverConfig := &metaserverConfig{ - Prefix: FSPrefix, - ServiceAddr: node.NodeIP, - ServicePort: strconv.Itoa(node.MetaserverPort), - ServiceExternalAddr: node.NodeIP, - ServiceExternalPort: strconv.Itoa(node.MetaserverExternalPort), - ClusterEtcdAddr: clusterEtcdAddr, - ClusterMdsAddr: clusterMdsAddr, - ClusterMdsDummyAddr: clusterMdsDummyAddr, - ClusterMetaserverAddr: clusterMetaserverAddr, - - DaemonID: daemonIDString, - ResourceName: resourceName, - CurrentConfigMapName: currentConfigMapName, - DataPathMap: config.NewDaemonDataPathMap( - path.Join(c.DataDirHostPath, fmt.Sprint("metaserver-", daemonIDString)), - path.Join(c.LogDirHostPath, fmt.Sprint("metaserver-", daemonIDString)), - FSContainerDataDir, - FSContainerLogDir, - ), - NodeName: node.NodeName, - NodeIP: node.NodeIP, - } - - dc := &topology.DeployConfig{ - Kind: c.Kind, - Role: config.ROLE_METASERVER, - Copysets: c.Metaserver.CopySets, - NodeName: node.NodeName, - NodeIP: node.NodeIP, - Port: node.MetaserverPort, - ReplicasSequence: node.ReplicasSequence, - Replicas: len(c.Nodes), - StandAlone: node.StandAlone, - } - metaserverConfigs = append(metaserverConfigs, metaserverConfig) - dcs = append(dcs, dc) - globalDCs = append(globalDCs, dc) - } - - return metaserverConfigs, dcs, globalDCs, nil -} diff --git a/pkg/metaserver/spec.go b/pkg/metaserver/spec.go deleted file mode 100644 index 75c6a4da..00000000 --- a/pkg/metaserver/spec.go +++ /dev/null @@ -1,115 +0,0 @@ -package metaserver - -import ( - "fmt" - "path" - "strconv" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/logrotate" - "github.com/opencurve/curve-operator/pkg/topology" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// makeDeployment make metaserver deployment to run mds daemon -func (c *Cluster) makeDeployment(metaserverConfig *metaserverConfig, nodeName string, nodeIP string) (*apps.Deployment, error) { - volumes := daemon.DaemonVolumes(config.MetaServerConfigMapDataKey, config.MetaServerConfigMapMountPath, metaserverConfig.DataPathMap, metaserverConfig.CurrentConfigMapName) - vols, _ := topology.CreateTopoAndToolVolumeAndMount(c.Cluster) - volumes = append(volumes, vols...) - labels := daemon.CephDaemonAppLabels(AppName, c.Namespace, "metaserver", metaserverConfig.DaemonID, c.Kind) - - // add log config volume - logConfCMVolSource := &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "log-conf"}} - volumes = append(volumes, v1.Volume{Name: "log-conf", VolumeSource: v1.VolumeSource{ConfigMap: logConfCMVolSource}}) - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: metaserverConfig.ResourceName, - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.makeMSDaemonContainer(nodeIP, metaserverConfig), - logrotate.MakeLogrotateContainer(), - }, - NodeName: nodeName, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: metaserverConfig.ResourceName, - Namespace: c.NamespacedName.Namespace, - Labels: labels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, err - } - - return d, nil -} - -// makeMdsDaemonContainer create mds container -func (c *Cluster) makeMSDaemonContainer(nodeIP string, metaserverConfig *metaserverConfig) v1.Container { - configFileMountPath := path.Join(config.MetaServerConfigMapMountPath, config.MetaServerConfigMapDataKey) - argsConfigFileDir := fmt.Sprintf("--confPath=%s", configFileMountPath) - - volMounts := daemon.DaemonVolumeMounts(config.MetaServerConfigMapDataKey, config.MetaServerConfigMapMountPath, metaserverConfig.DataPathMap, metaserverConfig.CurrentConfigMapName) - _, mounts := topology.CreateTopoAndToolVolumeAndMount(c.Cluster) - volMounts = append(volMounts, mounts...) - - port, _ := strconv.Atoi(metaserverConfig.ServicePort) - // externalPort, _ := strconv.Atoi(metaserverConfig.ServiceExternalPort) - - container := v1.Container{ - Name: "metaserver", - Command: []string{ - "/curvefs/metaserver/sbin/curvefs-metaserver", - }, - Args: []string{ - argsConfigFileDir, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: volMounts, - Ports: []v1.ContainerPort{ - { - Name: "listen-port", - ContainerPort: int32(port), - HostPort: int32(port), - Protocol: v1.ProtocolTCP, - }, - // { - // Name: "external-port", - // ContainerPort: int32(externalPort), - // HostPort: int32(externalPort), - // Protocol: v1.ProtocolTCP, - // }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - - return container -} diff --git a/pkg/monitor/monitor.go b/pkg/monitor/monitor.go deleted file mode 100644 index bddb7906..00000000 --- a/pkg/monitor/monitor.go +++ /dev/null @@ -1,185 +0,0 @@ -package monitor - -import ( - "context" - - "github.com/coreos/pkg/capnslog" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" -) - -const ( - PromAppName = "curve-prometheus" - GrafanaAppName = "curve-grafana" - NodeExporterAppName = "node-exporter" -) - -const ( - // container config path - PrometheusConfPath = "/etc/prometheus" - // container data path - PrometheusTSDBPath = "/prometheus" - // target json - TargetJSONDataKey = "target.json" - // container grafana data path - GrafanaContainerDataPath = "/var/lib/grafana" -) - -var ( - prometheusLabels = map[string]string{"app": "curve-prometheus"} - grafanaLables = map[string]string{"app": "curve-grafana"} - nodeExporterLabels = map[string]string{"app": "node-exporter"} -) - -type Cluster struct { - *daemon.Cluster -} - -func New(c *daemon.Cluster) *Cluster { - return &Cluster{Cluster: c} -} - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "monitor") - -type serviceTarget struct { - Targets []string `json:"targets"` - Labels map[string]string `json:"labels"` -} - -// Start configure monitor for curve cluster including Prometheus, Grafana and Node-Exporter -func (c *Cluster) Start(nodesInfo []daemon.NodeInfo, dcs []*topology.DeployConfig) error { - err := c.startPrometheus(nodesInfo, dcs) - if err != nil { - return err - } - - err = c.startGrafana() - if err != nil { - return err - } - - err = c.startNodeExporter(nodesInfo) - if err != nil { - return err - } - - return nil -} - -// startPrometheus create prometheus config and deployment then create it in cluster. -func (c *Cluster) startPrometheus(nodesInfo []daemon.NodeInfo, dcs []*topology.DeployConfig) error { - targetJson, err := parsePrometheusTarget(dcs) - if err != nil { - return err - } - - nodeIPs := filterNodeForExporter(nodesInfo) - - err = c.createPrometheusConfigMap(targetJson, nodeIPs) - if err != nil { - return err - } - - d, err := c.makePrometheusDeployment() - if err != nil { - return err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create prometheus deployment %s", d.GetName()) - } - logger.Infof("deployment for monitor %s already exists. updating if needed", d.GetName()) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - } - - // wait deployment to start - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return err - } - - logger.Info("Promethes deploy successed") - - return nil -} - -// startGrafana create Grafana deployment and create it in cluster. -func (c *Cluster) startGrafana() error { - err := c.createGrafanaConfigMap() - if err != nil { - return err - } - - d, err := c.makeGrafanaDeployment() - if err != nil { - return err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create grafana deployment %s", d.GetName()) - } - logger.Infof("deployment for monitor %s already exists. updating if needed", d.GetName()) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - } - - // wait deployment to start - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return err - } - - return nil -} - -// startNodeExporter create node-exporter deployment and create it in cluster. -func (c *Cluster) startNodeExporter(nodesInfo []daemon.NodeInfo) error { - nodeNames := filterNodeNameForExporter(nodesInfo) - var deploymentsToWaitFor []*apps.Deployment - for _, nodeName := range nodeNames { - d, err := c.makeNodeExporterDeployment(nodeName) - if err != nil { - return err - } - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create node exporter deployment %s", d.GetName()) - } - logger.Infof("deployment for monitor %s already exists. updating if needed", d.GetName()) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %s has been created , waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return err - } - } - return nil -} diff --git a/pkg/monitor/script.go b/pkg/monitor/script.go deleted file mode 100644 index dc92b894..00000000 --- a/pkg/monitor/script.go +++ /dev/null @@ -1,29 +0,0 @@ -package monitor - -var PROMETHEUS_YML = ` -global: - scrape_interval: 3s - evaluation_interval: 15s -scrape_configs: - - job_name: 'prometheus' - static_configs: - - targets: ['localhost:%d'] - - job_name: 'curve_metrics' - file_sd_configs: - - files: ['target.json'] - - job_name: 'node' - static_configs: - - targets: %s -` - -var GRAFANA_DATA_SOURCE = ` -datasources: -- name: 'Prometheus' - type: 'prometheus' - access: 'proxy' - org_id: 1 - url: 'http://%s:%d' - is_default: true - version: 1 - editable: true -` diff --git a/pkg/monitor/spec.go b/pkg/monitor/spec.go deleted file mode 100644 index 2c345cf7..00000000 --- a/pkg/monitor/spec.go +++ /dev/null @@ -1,356 +0,0 @@ -package monitor - -import ( - "fmt" - "path" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// createPrometheusConfigMap create prometheus.yml configmap and mount to prometheus container -func (c *Cluster) createPrometheusConfigMap(targetJson string, nodeIPs []string) error { - configMapData := make(map[string]string) - nodeExporterEndpoints := c.getExporterEndpoints(nodeIPs) - prometheusYamlContent := fmt.Sprintf(PROMETHEUS_YML, c.Monitor.Prometheus.ListenPort, nodeExporterEndpoints) - configMapData[config.PrometheusConfigMapDataKey] = prometheusYamlContent - configMapData[TargetJSONDataKey] = targetJson - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.PrometheusConfigMapName, - Namespace: c.Namespace, - }, - Data: configMapData, - } - - err := c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to configmap %q", config.PrometheusConfigMapName) - } - - // create configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create configmap %s", config.PrometheusConfigMapName) - } - return nil -} - -// makePrometheusDeployment make prometheus deployment -func (c *Cluster) makePrometheusDeployment() (*apps.Deployment, error) { - dataPath := &config.DataPathMap{ - HostDataDir: c.Monitor.Prometheus.DataDir, - ContainerDataDir: PrometheusTSDBPath, - } - volumes := daemon.DaemonVolumes("", PrometheusConfPath, dataPath, config.PrometheusConfigMapName) - - runAsUser := int64(0) - runAsNonRoot := false - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: PromAppName, - Labels: prometheusLabels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.createPrometheusContainer(dataPath), - }, - NodeName: c.Monitor.MonitorHost, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - SecurityContext: &v1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - }, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: PromAppName, - Namespace: c.NamespacedName.Namespace, - Labels: prometheusLabels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: prometheusLabels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon deployment %q", d.Name) - } - - return d, nil -} - -// createPrometheusContainer create prometheus container -func (c *Cluster) createPrometheusContainer(dataPath *config.DataPathMap) v1.Container { - // construct start parameters - argsMap := map[string]string{ - "config.file": path.Join(PrometheusConfPath, config.PrometheusConfigMapDataKey), - "storage.tsdb.path": PrometheusTSDBPath, - "storage.tsdb.retention.time": c.Monitor.Prometheus.RetentionTime, - "storage.tsdb.retention.size": c.Monitor.Prometheus.RetentionSize, - "web.listen-address": fmt.Sprint(":", c.Monitor.Prometheus.ListenPort), - } - args := []string{} - for k, v := range argsMap { - var item string - if v != "" { - item = fmt.Sprintf("--%s=%v", k, v) - } else { - item = fmt.Sprintf("--%s", k) - } - args = append(args, item) - } - - container := v1.Container{ - Name: PromAppName, - Image: c.Monitor.Prometheus.ContainerImage, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - Args: args, - VolumeMounts: daemon.DaemonVolumeMounts("", PrometheusConfPath, dataPath, config.PrometheusConfigMapName), - Ports: []v1.ContainerPort{ - { - Name: "prometheus-port", - ContainerPort: int32(c.Monitor.Prometheus.ListenPort), - HostPort: int32(c.Monitor.Prometheus.ListenPort), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - return container -} - -// createGrafanaConfigMap create grafana datasource configmap all.yml -func (c *Cluster) createGrafanaConfigMap() error { - configMapData := make(map[string]string) - content := fmt.Sprintf(GRAFANA_DATA_SOURCE, "127.0.0.1", c.Monitor.Prometheus.ListenPort) - configMapData[config.GrafanaDataSourcesConfigMapDataKey] = content - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.GrafanaDataSourcesConfigMapName, - Namespace: c.Namespace, - }, - Data: configMapData, - } - - err := c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to configmap %q", config.PrometheusConfigMapName) - } - - // create configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed to create configmap %s", config.PrometheusConfigMapName) - } - - return nil -} - -// makeGrafanaDeployment make grafana deployment -func (c *Cluster) makeGrafanaDeployment() (*apps.Deployment, error) { - dataPath := &config.DataPathMap{ - HostDataDir: c.Monitor.Grafana.DataDir, - ContainerDataDir: GrafanaContainerDataPath, - } - volumes := daemon.DaemonVolumes(config.GrafanaDataSourcesConfigMapDataKey, config.GrafanaDataSourcesConfigMapMountPath, dataPath, config.GrafanaDataSourcesConfigMapName) - vols := daemon.DaemonVolumes("", config.GrafanaDashboardsMountPath, nil, config.GrafanaDashboardsTemp) - volumes = append(volumes, vols...) - vols = daemon.DaemonVolumes(config.GrafanaINIConfigMapDataKey, config.GrafanaINIConfigMountPath, nil, config.GrafanaDashboardsTemp) - volumes = append(volumes, vols...) - - runAsUser := int64(0) - runAsNonRoot := false - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: GrafanaAppName, - Labels: grafanaLables, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.createGrafaContainer(dataPath), - }, - NodeName: c.Monitor.MonitorHost, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - SecurityContext: &v1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - }, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: GrafanaAppName, - Namespace: c.NamespacedName.Namespace, - Labels: grafanaLables, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: grafanaLables, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon deployment %q", d.Name) - } - - return d, nil -} - -// createGrafaContainer create grafana container -func (c *Cluster) createGrafaContainer(dataPath *config.DataPathMap) v1.Container { - volMounts := daemon.DaemonVolumeMounts(config.GrafanaDataSourcesConfigMapDataKey, config.GrafanaDataSourcesConfigMapMountPath, dataPath, config.GrafanaDataSourcesConfigMapName) - volM := daemon.DaemonVolumeMounts("", config.GrafanaDashboardsMountPath, nil, config.GrafanaDashboardsTemp) - volMounts = append(volMounts, volM...) - volM = daemon.DaemonVolumeMounts(config.GrafanaINIConfigMapDataKey, config.GrafanaINIConfigMountPath, nil, config.GrafanaDashboardsTemp) - volMounts = append(volMounts, volM...) - - container := v1.Container{ - Name: GrafanaAppName, - Image: c.Monitor.Grafana.ContainerImage, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: volMounts, - Ports: []v1.ContainerPort{ - { - Name: "grafana-port", - ContainerPort: int32(c.Monitor.Grafana.ListenPort), - HostPort: int32(c.Monitor.Grafana.ListenPort), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{ - {Name: "TZ", Value: "Asia/Hangzhou"}, - {Name: "GF_SECURITY_ADMIN_USER", Value: c.Monitor.Grafana.UserName}, - {Name: "GF_SECURITY_ADMIN_PASSWORD", Value: c.Monitor.Grafana.PassWord}, - }, - } - return container -} - -func (c *Cluster) makeNodeExporterDeployment(nodeName string) (*apps.Deployment, error) { - runAsUser := int64(0) - runAsNonRoot := false - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: NodeExporterAppName, - Labels: nodeExporterLabels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.createNodeExporterContainer(), - }, - NodeName: nodeName, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - SecurityContext: &v1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - }, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprint(NodeExporterAppName, "-", nodeName), - Namespace: c.NamespacedName.Namespace, - Labels: nodeExporterLabels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: nodeExporterLabels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon deployment %q", d.Name) - } - - return d, nil -} - -func (c *Cluster) createNodeExporterContainer() v1.Container { - // construct start parameters - argsMap := map[string]interface{}{ - "path.rootfs": "/host", - "collector.cpu.info": nil, - "web.listen-address": fmt.Sprintf(":%d", c.Monitor.NodeExporter.ListenPort), - } - args := []string{} - for k, v := range argsMap { - var item string - if v != nil { - item = fmt.Sprintf("--%s=%v", k, v) - } else { - item = fmt.Sprintf("--%s", k) - } - args = append(args, item) - } - - container := v1.Container{ - Name: NodeExporterAppName, - Image: c.Monitor.NodeExporter.ContainerImage, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - Args: args, - Ports: []v1.ContainerPort{ - { - Name: "exporter-port", - ContainerPort: int32(c.Monitor.NodeExporter.ListenPort), - HostPort: int32(c.Monitor.NodeExporter.ListenPort), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - return container -} diff --git a/pkg/monitor/util.go b/pkg/monitor/util.go deleted file mode 100644 index a3cacd4f..00000000 --- a/pkg/monitor/util.go +++ /dev/null @@ -1,86 +0,0 @@ -package monitor - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/topology" - "github.com/pkg/errors" -) - -// filterNodeForExporter distinct nodes and return nodes ip -func filterNodeForExporter(nodesInfo []daemon.NodeInfo) []string { - var preNodeName string - var ret []string - for _, node := range nodesInfo { - if node.NodeName != preNodeName { - ret = append(ret, node.NodeIP) - } - preNodeName = node.NodeName - } - return ret -} - -// filterNodeNameForExporter distinct nodes and return nodes name -func filterNodeNameForExporter(nodesInfo []daemon.NodeInfo) []string { - var preNodeName string - var ret []string - for _, node := range nodesInfo { - if node.NodeName != preNodeName { - ret = append(ret, node.NodeName) - } - preNodeName = node.NodeName - } - return ret -} - -// getExporterEndpoints get nodes that to deploy node-exporter on it -func (c *Cluster) getExporterEndpoints(nodeIPs []string) string { - endpoint := []string{} - for _, item := range nodeIPs { - endpoint = append(endpoint, fmt.Sprintf("'%s:%d'", item, c.Monitor.NodeExporter.ListenPort)) - } - return fmt.Sprintf("[%s]", strings.Join(endpoint, ",")) -} - -// parsePrometheusTarget parse topology and create target.json string. -func parsePrometheusTarget(dcs []*topology.DeployConfig) (string, error) { - targets := []serviceTarget{} - tMap := make(map[string]serviceTarget) - for _, dc := range dcs { - role := dc.Role - ip := dc.NodeIP - var item string - switch role { - case config.ROLE_ETCD: - item = fmt.Sprintf("%s:%d", ip, dc.Port) - case config.ROLE_MDS, - config.ROLE_CHUNKSERVER, - config.ROLE_METASERVER: - item = fmt.Sprintf("%s:%d", ip, dc.Port) - case config.ROLE_SNAPSHOTCLONE: - item = fmt.Sprintf("%s:%d", ip, dc.Port) - } - if _, ok := tMap[role]; ok { - t := tMap[role] - t.Targets = append(t.Targets, item) - tMap[role] = t - } else { - tMap[role] = serviceTarget{ - Labels: map[string]string{"job": role}, - Targets: []string{item}, - } - } - } - for _, v := range tMap { - targets = append(targets, v) - } - target, err := json.Marshal(targets) - if err != nil { - return "", errors.New("failed to parse prometheus ") - } - return string(target), nil -} diff --git a/pkg/service/cleanup.go b/pkg/service/cleanup.go new file mode 100644 index 00000000..91b69b3b --- /dev/null +++ b/pkg/service/cleanup.go @@ -0,0 +1,91 @@ +package service + +import ( + "strings" + "time" + + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/topology" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + clusterCleanUpPolicyRetryInterval = 5 * time.Second + + CURVE_DATA_DIR_HOST_PATH = "CURVE_DATA_DIR_HOST_PATH" + CURVE_LOG_DIR_HOST_PATH = "CURVE_LOG_DIR_HOST_PATH" +) + +var ( + CURVE_CLEAN_UP_APP_NAME = "curve-cleanup-%s" + CURVE_CLEAN_UP_POD_NAME = "curve-cleanup" +) + +func StartClusterCleanUpJob(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) error { + labels := map[string]string{"app": CURVE_CLEAN_UP_APP_NAME} + securityContext := k8sutil.PrivilegedContext(true) + + commandLine := `rm -rf ${CURVE_DATA_DIR_HOST_PATH} && rm -rf ${CURVE_LOG_DIR_HOST_PATH} ` + + for _, dc := range dcs { + vols, volMounts := getServiceHostPathVolumeAndMount(dc) + jobName := k8sutil.TruncateNodeNameForJob(CURVE_CLEAN_UP_APP_NAME, dc.GetHost()) + container := v1.Container{ + Name: CURVE_CLEAN_UP_POD_NAME, + Image: cluster.GetContainerImage(), + ImagePullPolicy: v1.PullIfNotPresent, + Command: []string{ + "/bin/bash", + "-c", + }, + Args: []string{ + commandLine, + }, + Env: []v1.EnvVar{ + {Name: CURVE_DATA_DIR_HOST_PATH, Value: strings.TrimRight(dc.GetDataDir(), "/")}, + {Name: CURVE_LOG_DIR_HOST_PATH, Value: strings.TrimRight(dc.GetLogDir(), "/")}, + }, + VolumeMounts: volMounts, + SecurityContext: securityContext, + } + podTempalteSpec := v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Labels: labels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + container, + }, + Volumes: vols, + RestartPolicy: v1.RestartPolicyOnFailure, + NodeName: dc.GetHost(), + }, + } + + ttlTimeout := int32(0) + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: CURVE_CLEAN_UP_POD_NAME, + Namespace: cluster.GetNameSpace(), + Labels: labels, + }, + Spec: batchv1.JobSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: podTempalteSpec, + TTLSecondsAfterFinished: &ttlTimeout, // delete itself immediately after finished. + }, + } + err := k8sutil.RunReplaceableJob(cluster.GetContext().Clientset, job, true) + if err != nil { + return err + } + } + + return nil +} diff --git a/pkg/service/pool.go b/pkg/service/pool.go new file mode 100644 index 00000000..3a96d9a4 --- /dev/null +++ b/pkg/service/pool.go @@ -0,0 +1,278 @@ +package service + +import ( + "encoding/json" + "fmt" + "sort" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/topology" +) + +const ( + CURVE_TOPOLOGY_CONFIGMAP = "curve-cluster-pool" + TOPO_JSON_FILE_NAME = "topology.json" +) + +const ( + KIND_CURVEBS = topology.KIND_CURVEBS + KIND_CURVEFS = topology.KIND_CURVEFS + ROLE_CHUNKSERVER = topology.ROLE_CHUNKSERVER + ROLE_METASERVER = topology.ROLE_METASERVER + + DEFAULT_REPLICAS_PER_COPYSET = 3 + DEFAULT_ZONES_PER_POOL = 3 + DEFAULT_TYPE = 0 + DEFAULT_SCATTER_WIDTH = 0 +) + +/* + * curvebs_cluster_topo: + * servers: + * - name: server1 + * internalip: 127.0.0.1 + * internalport: 16701 + * externalip: 127.0.0.1 + * externalport: 16701 + * zone: zone1 + * physicalpool: pool1 + * ... + * logicalpools: + * - name: pool1 + * physicalpool: pool1 + * replicasnum: 3 + * copysetnum: 100 + * zonenum: 3 + * type: 0 + * scatterwidth: 0 + * ... + * + * + * curvefs_cluster_topo: + * servers: + * - name: server1 + * internalip: 127.0.0.1 + * internalport: 16701 + * externalip: 127.0.0.1 + * externalport: 16701 + * zone: zone1 + * pool: pool1 + * ... + * pools: + * - name: pool1 + * replicasnum: 3 + * copysetnum: 100 + * zonenum: 3 + */ + +type ( + LogicalPool struct { + Name string `json:"name"` + Replicas int `json:"replicasnum"` + Zones int `json:"zonenum"` + Copysets int `json:"copysetnum"` + Type int `json:"type"` // curvebs + ScatterWidth int `json:"scatterwidth"` // curvebs + PhysicalPool string `json:"physicalpool"` // curvebs + } + + Server struct { + Name string `json:"name"` + InternalIp string `json:"internalip"` + InternalPort int `json:"internalport"` + ExternalIp string `json:"externalip"` + ExternalPort int `json:"externalport"` + Zone string `json:"zone"` + PhysicalPool string `json:"physicalpool,omitempty"` // curvebs + Pool string `json:"pool,omitempty"` // curvefs + } + + CurveClusterTopo struct { + Servers []Server `json:"servers"` + LogicalPools []LogicalPool `json:"logicalpools,omitempty"` // curvebs + Pools []LogicalPool `json:"pools,omitempty"` // curvefs + NPools int `json:"npools"` + } +) + +// prepare get cluster pool or create new cluster pool +func createOrUpdatePoolConfigMap(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) error { + clusterPool, err := getClusterPool(cluster, dcs) + if err != nil { + return err + } + + var bytes []byte + bytes, err = json.Marshal(clusterPool) + if err != nil { + return err + } + clusterPoolJson := string(bytes) + data := map[string]string{ + TOPO_JSON_FILE_NAME: clusterPoolJson, + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: CURVE_TOPOLOGY_CONFIGMAP, + Namespace: cluster.GetNameSpace(), + }, + Data: data, + } + + _, err = k8sutil.CreateOrUpdateConfigMap(cluster.GetContext().Clientset, cm) + if err != nil { + return nil + } + + return nil +} + +func getClusterPool(cluster clusterd.Clusterer, dcs []*topology.DeployConfig) (CurveClusterTopo, error) { + cm, err := k8sutil.GetConfigMapByName(cluster.GetContext().Clientset, cluster.GetNameSpace(), CURVE_TOPOLOGY_CONFIGMAP) + if err != nil { + if apierrors.IsNotFound(err) { + return generateDefaultClusterPool(dcs) + } + return CurveClusterTopo{}, err + } + + oldPool := CurveClusterTopo{} + oldPoolStrData := cm.Data[TOPO_JSON_FILE_NAME] + err = json.Unmarshal([]byte(oldPoolStrData), &oldPool) + pool, err := generateDefaultClusterPool(dcs) + if err != nil { + return pool, err + } + + // gurantee oldPool and pool has same servers + for i, server := range pool.Servers { + oldPool.Servers[i].InternalIp = server.InternalIp + oldPool.Servers[i].InternalPort = server.InternalPort + oldPool.Servers[i].ExternalIp = server.ExternalIp + oldPool.Servers[i].ExternalPort = server.ExternalPort + } + if dcs[0].GetKind() == topology.KIND_CURVEBS { + for i, pool := range pool.LogicalPools { + oldPool.LogicalPools[i].Copysets = pool.Copysets + } + } + + return oldPool, nil +} + +func generateDefaultClusterPool(dcs []*topology.DeployConfig) (topo CurveClusterTopo, err error) { + topo = generateClusterPool(dcs, "pool1") + return +} + +func generateClusterPool(dcs []*topology.DeployConfig, poolName string) CurveClusterTopo { + lpool, servers := createLogicalPool(dcs, poolName) + topo := CurveClusterTopo{Servers: servers, NPools: 1} + if dcs[0].GetKind() == KIND_CURVEBS { + topo.LogicalPools = []LogicalPool{lpool} + } else { + topo.Pools = []LogicalPool{lpool} + } + return topo +} + +func createLogicalPool(dcs []*topology.DeployConfig, logicalPool string) (LogicalPool, []Server) { + var zone string + copysets := 0 + servers := []Server{} + zones := DEFAULT_ZONES_PER_POOL + nextZone := genNextZone(zones) + physicalPool := logicalPool + kind := dcs[0].GetKind() + SortDeployConfigs(dcs) + for _, dc := range dcs { + role := dc.GetRole() + if (role == ROLE_CHUNKSERVER && kind == KIND_CURVEBS) || + (role == ROLE_METASERVER && kind == KIND_CURVEFS) { + if dc.GetParentId() == dc.GetId() { + zone = nextZone() + } + + // NOTE: if we deploy chunkservers with instance feature + // and the value of instance greater than 1, we should + // set internal port and external port to 0 for let MDS + // attribute them as services on the same machine. + // see issue: https://github.com/opencurve/curve/issues/1441 + internalPort := dc.GetListenPort() + externalPort := dc.GetListenExternalPort() + if dc.GetInstances() > 1 { + internalPort = 0 + externalPort = 0 + } + + server := Server{ + Name: formatName(dc), + InternalIp: dc.GetHostIp(), + InternalPort: internalPort, + ExternalIp: dc.GetListenExternalIp(), + ExternalPort: externalPort, + Zone: zone, + } + if kind == KIND_CURVEBS { + server.PhysicalPool = physicalPool + } else { + server.Pool = logicalPool + } + copysets += dc.GetCopysets() + servers = append(servers, server) + } + } + + // copysets + copysets = (int)(copysets / DEFAULT_REPLICAS_PER_COPYSET) + if copysets == 0 { + copysets = 1 + } + + // logical pool + lpool := LogicalPool{ + Name: logicalPool, + Copysets: copysets, + Zones: zones, + Replicas: DEFAULT_REPLICAS_PER_COPYSET, + } + if kind == KIND_CURVEBS { + lpool.ScatterWidth = DEFAULT_SCATTER_WIDTH + lpool.Type = DEFAULT_TYPE + lpool.PhysicalPool = physicalPool + } + + return lpool, servers +} + +func genNextZone(zones int) func() string { + idx := 0 + return func() string { + idx++ + return fmt.Sprintf("zone%d", (idx-1)%zones+1) + } +} + +// we should sort the "dcs" for generate correct zone number +func SortDeployConfigs(dcs []*topology.DeployConfig) { + sort.Slice(dcs, func(i, j int) bool { + dc1, dc2 := dcs[i], dcs[j] + if dc1.GetRole() == dc2.GetRole() { + if dc1.GetHostSequence() == dc2.GetHostSequence() { + return dc1.GetInstancesSequence() < dc2.GetInstancesSequence() + } + return dc1.GetHostSequence() < dc2.GetHostSequence() + } + return dc1.GetRole() < dc2.GetRole() + }) +} + +func formatName(dc *topology.DeployConfig) string { + return fmt.Sprintf("%s_%s_%d", dc.GetHost(), dc.GetName(), dc.GetInstancesSequence()) +} diff --git a/pkg/service/register.go b/pkg/service/register.go new file mode 100644 index 00000000..1376524d --- /dev/null +++ b/pkg/service/register.go @@ -0,0 +1,177 @@ +package service + +import ( + "fmt" + "path" + "strconv" + "strings" + + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/topology" +) + +const ( + POOL_TYPE_PHYSICAL = "physicalpool" + POOL_TYPE_LOGICAL = "logicalpool" + + WAIT_MDS_ELECTION_CONTAINER = "wait-mds-election-container" + WAIT_CHUNKSERVER_START_CONTAINER = "wait-chunkserver-start-container" +) + +var ( + CURVE_CREATE_POOL_JOB = "curve-create-%s" +) + +// StartJobCreatePool create job to create physicalpool or logicalpool +func StartJobCreatePool(cluster clusterd.Clusterer, dc *topology.DeployConfig, dcs []*topology.DeployConfig, poolType string) error { + // create or update CURVE_TOPOLOGY_CONFIGMAP configmap that store cluster pool json + err := createOrUpdatePoolConfigMap(cluster, dcs) + if err != nil { + return err + } + + // security context + privileged := true + runAsUser := int64(0) + runAsNonRoot := false + readOnlyRootFilesystem := false + + vols, volMounts := getToolsAndTopoVolumeAndMount(dc) + poolJsonPath := path.Join(dc.GetProjectLayout().ToolsConfDir, TOPO_JSON_FILE_NAME) + container := v1.Container{ + Name: fmt.Sprintf(CURVE_CREATE_POOL_JOB, poolType), + Command: []string{ + genCreatePoolCommand(dc, poolType, poolJsonPath), + }, + Image: cluster.GetContainerImage(), + ImagePullPolicy: v1.PullIfNotPresent, + VolumeMounts: volMounts, + SecurityContext: &v1.SecurityContext{ + Privileged: &privileged, + RunAsUser: &runAsUser, + RunAsNonRoot: &runAsNonRoot, + ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + }, + } + + initContianers, err := makeCreatePoolJobInitContainers(cluster, dcs, poolType) + if err != nil { + return err + } + + podSpec := v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(CURVE_CREATE_POOL_JOB, poolType), + Labels: getCreatePoolJobLabel(poolType), + }, + Spec: v1.PodSpec{ + InitContainers: initContianers, + Containers: []v1.Container{ + container, + }, + RestartPolicy: v1.RestartPolicyOnFailure, + HostNetwork: true, + DNSPolicy: v1.DNSClusterFirstWithHostNet, + Volumes: vols, + }, + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(CURVE_CREATE_POOL_JOB, poolType), + Namespace: cluster.GetNameSpace(), + Labels: getCreatePoolJobLabel(poolType), + }, + Spec: batchv1.JobSpec{ + Template: podSpec, + }, + } + + err = cluster.GetOwnerInfo().SetControllerReference(job) + if err != nil { + return err + } + + err = k8sutil.RunReplaceableJob(cluster.GetContext().Clientset, job, true) + return err +} + +// getCreatePoolJobLabel return curve-create-pool Pod and Deployment label +func getCreatePoolJobLabel(poolType string) map[string]string { + labels := map[string]string{} + labels["app"] = "create-pool" + labels["type"] = poolType + return labels +} + +// genCreatePoolCommand generate create pool command by cluster kind and poolType parameter +func genCreatePoolCommand(dc *topology.DeployConfig, pooltype, poolJSONPath string) string { + layout := dc.GetProjectLayout() + toolsBinaryPath := layout.ToolsBinaryPath + if dc.GetKind() == topology.KIND_CURVEFS { + // for curvefs, the default topology json path is current directory's topology.json + return fmt.Sprintf("%s create-topology", toolsBinaryPath) + } + + return fmt.Sprintf("%s -op=create_%s -cluster_map=%s", + toolsBinaryPath, pooltype, poolJSONPath) +} + +// makeCreatePoolJobInitContainers create two init container to precheck work +// 1. wait mds leader election success(bs and fs) +// 2. wait chunkservers online before create logical pool(bs) +func makeCreatePoolJobInitContainers(cluster clusterd.Clusterer, dcs []*topology.DeployConfig, poolType string) ([]v1.Container, error) { + containers := []v1.Container{} + clusterMdsAddr, err := dcs[0].GetVariables().Get("cluster_mds_addr") + if err != nil { + return nil, err + } + clusterMdsAddr = strings.Replace(clusterMdsAddr, ",", " ", -1) + + wait_mds_election_container := v1.Container{ + Name: WAIT_MDS_ELECTION_CONTAINER, + Command: []string{ + "bash", + "-c", + wait_mds_election, + }, + Image: cluster.GetContainerImage(), + ImagePullPolicy: v1.PullIfNotPresent, + Env: []v1.EnvVar{ + { + Name: "CLUSTER_MDS_ADDR", + Value: clusterMdsAddr, + }, + }, + } + + containers = append(containers, wait_mds_election_container) + + if dcs[0].GetKind() == topology.KIND_CURVEBS && poolType == POOL_TYPE_LOGICAL { + nChunkserver := len(topology.FilterDeployConfigByRole(dcs, topology.ROLE_CHUNKSERVER)) + wait_chunkserver_start_container := v1.Container{ + Name: WAIT_CHUNKSERVER_START_CONTAINER, + Command: []string{ + "bash", + "-c", + wait_chunkserver_start, + }, + Image: cluster.GetContainerImage(), + ImagePullPolicy: v1.PullIfNotPresent, + Env: []v1.EnvVar{ + { + Name: "CHUNKSERVER_NUMS", + Value: strconv.Itoa(nChunkserver), + }, + }, + } + containers = append(containers, wait_chunkserver_start_container) + } + + return containers, nil +} diff --git a/pkg/service/scripts.go b/pkg/service/scripts.go new file mode 100644 index 00000000..049f1053 --- /dev/null +++ b/pkg/service/scripts.go @@ -0,0 +1,43 @@ +package service + +var wait_mds_election string = ` +#!/usr/bin/env bash + +[[ -z $(which curl) ]] && apt-get install -y curl +wait=0 +while ((wait<20)) +do + for addr in "$CLUSTER_MDS_ADDR" + do + curl --connect-timeout 3 --max-time 10 $addr -Iso /dev/null + if [ $? == 0 ]; then + exit 0 + fi + done + sleep 0.5s + wait=$(expr $wait + 1) +done + +exit 1 +` + +var wait_chunkserver_start string = ` +#!/usr/bin/env bash + +g_total=${CHUNKSERVER_NUMS} +total=$(expr $g_total + 0) + +wait=0 +while ((wait<60)) +do + online=$(curve_ops_tool chunkserver-status | sed -nr 's/.*online = ([0-9]+).*/\1/p') + if [[ $online -eq $total ]]; then + exit 0 + fi + + sleep 0.5s + wait=$((wait+1)) +done + +exit 1 +` diff --git a/pkg/service/service.go b/pkg/service/service.go new file mode 100644 index 00000000..5cce6ff6 --- /dev/null +++ b/pkg/service/service.go @@ -0,0 +1,237 @@ +package service + +import ( + "fmt" + "strings" + + "github.com/coreos/pkg/capnslog" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/topology" +) + +var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "service") + +// createService create specified service according to specified dc object +// for example etcd, mds +func StartService(cluster clusterd.Clusterer, dc *topology.DeployConfig) error { + return makeServiceDeployment(cluster, dc) +} + +// createServiceDeployment create service Deployment and wait it to start according to specified dc object +func makeServiceDeployment(cluster clusterd.Clusterer, dc *topology.DeployConfig) error { + layout := dc.GetProjectLayout() + vols, volMounts := getServiceHostPathVolumeAndMount(dc) + + // resolve configmap volume and volumeMount + for _, conf := range layout.ServiceConfFiles { + vm, vms := getServiceConfigMapVolumeAndMount(fmt.Sprintf("%s_%s", dc.GetName(), conf.Name), + layout.ServiceConfDir) + vols = append(vols, vm) + volMounts = append(volMounts, vms) + } + + container := v1.Container{ + Name: getResourceName(dc), + Command: []string{ + fmt.Sprintf("--role %s --args='%s'", dc.GetRole(), getArguments(dc)), + }, + Image: cluster.GetContainerImage(), + ImagePullPolicy: v1.PullIfNotPresent, + VolumeMounts: volMounts, + Ports: getContainerPorts(dc), + Env: []v1.EnvVar{ + {Name: "TZ", Value: "Asia/Hangzhou"}, + {Name: "'LD_PRELOAD=%s'", Value: "/usr/local/lib/libjemalloc.so"}, + }, + } + + podSpec := v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: getResourceName(dc), + Labels: getServiceLabel(dc), + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + // c.makeChmodDirInitContainer(etcdConfig), + }, + Containers: []v1.Container{ + // c.makeEtcdDaemonContainer(nodeName, ip, etcdConfig, etcdConfig.ClusterEtcdHttpAddr), + // logrotate.MakeLogrotateContainer(), + container, + }, + NodeName: dc.GetHost(), + RestartPolicy: getRestartPolicy(dc), + HostNetwork: true, + DNSPolicy: v1.DNSClusterFirstWithHostNet, + Volumes: vols, + }, + } + + replicas := int32(1) + d := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: getResourceName(dc), + Namespace: cluster.GetNameSpace(), + Labels: getServiceLabel(dc), + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: getServiceLabel(dc), + }, + Template: podSpec, + Replicas: &replicas, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + }, + } + + // set ownerReference + err := cluster.GetOwnerInfo().SetControllerReference(d) + if err != nil { + return err + } + + _, err = k8sutil.CreateOrUpdateDeploymentAndWaitStart(cluster.GetContext().Clientset, d) + if err != nil { + return err + } + + logger.Infof("Create %s service Deployment in namespace %s successed", dc.GetName(), cluster.GetNameSpace()) + + return nil +} + +// getResourceName get the name of k8s curve resource +func getResourceName(dc *topology.DeployConfig) string { + return fmt.Sprintf("%s-%s", "curve", dc.GetName()) +} + +// getServiceLabel get labels of specified service +func getServiceLabel(dc *topology.DeployConfig) map[string]string { + labels := map[string]string{} + labels["role"] = dc.GetRole() + labels["name"] = dc.GetName() + return labels +} + +// getArguments get service command arguments +func getArguments(dc *topology.DeployConfig) string { + role := dc.GetRole() + if role != topology.ROLE_CHUNKSERVER { + return "" + } + + // only chunkserver need so many arguments, but who cares + layout := dc.GetProjectLayout() + dataDir := layout.ServiceDataDir + chunkserverArguments := map[string]interface{}{ + // chunkserver + "conf": layout.ServiceConfPath, + "chunkServerIp": dc.GetHostIp(), + "enableExternalServer": dc.GetEnableExternalServer(), + "chunkServerExternalIp": dc.GetListenExternalIp(), + "chunkServerPort": dc.GetListenPort(), + "chunkFilePoolDir": dataDir, + "chunkFilePoolMetaPath": fmt.Sprintf("%s/chunkfilepool.meta", dataDir), + "walFilePoolDir": dataDir, + "walFilePoolMetaPath": fmt.Sprintf("%s/walfilepool.meta", dataDir), + "copySetUri": fmt.Sprintf("local://%s/copysets", dataDir), + "recycleUri": fmt.Sprintf("local://%s/recycler", dataDir), + "raftLogUri": fmt.Sprintf("curve://%s/copysets", dataDir), + "raftSnapshotUri": fmt.Sprintf("curve://%s/copysets", dataDir), + "chunkServerStoreUri": fmt.Sprintf("local://%s", dataDir), + "chunkServerMetaUri": fmt.Sprintf("local://%s/chunkserver.dat", dataDir), + // brpc + "bthread_concurrency": 18, + "graceful_quit_on_sigterm": true, + // raft + "raft_sync": true, + "raft_sync_meta": true, + "raft_sync_segments": true, + "raft_max_segment_size": 8388608, + "raft_max_install_snapshot_tasks_num": 1, + "raft_use_fsync_rather_than_fdatasync": false, + } + + arguments := []string{} + for k, v := range chunkserverArguments { + arguments = append(arguments, fmt.Sprintf("-%s=%v", k, v)) + } + return strings.Join(arguments, " ") +} + +// getRestartPolicy chunkserver and metaserver never restart and others always start +func getRestartPolicy(dc *topology.DeployConfig) v1.RestartPolicy { + switch dc.GetRole() { + case topology.ROLE_ETCD, + topology.ROLE_MDS, + topology.ROLE_SNAPSHOTCLONE: + return v1.RestartPolicyAlways + } + return v1.RestartPolicyNever +} + +// newContainerPort create a container port obj +func newContainerPort(name string, containerPort, hostPort int32) v1.ContainerPort { + return v1.ContainerPort{ + Name: name, + ContainerPort: containerPort, + HostPort: hostPort, + } +} + +// getContainerPorts get the service need to network port +func getContainerPorts(dc *topology.DeployConfig) []v1.ContainerPort { + ports := []v1.ContainerPort{} + ports = append(ports, newContainerPort( + topology.CONFIG_LISTEN_PORT.Key(), + int32(dc.GetListenPort()), + int32(dc.GetListenPort()), + )) + + role := dc.GetRole() + switch role { + case topology.ROLE_ETCD: + ports = append(ports, newContainerPort( + topology.CONFIG_LISTEN_CLIENT_PORT.Key(), + int32(dc.GetListenClientPort()), + int32(dc.GetListenClientPort()), + )) + case topology.ROLE_MDS: + ports = append(ports, newContainerPort( + topology.CONFIG_LISTEN_DUMMY_PORT.Key(), + int32(dc.GetListenDummyPort()), + int32(dc.GetListenDummyPort()), + )) + case topology.ROLE_CHUNKSERVER: + if dc.GetEnableExternalServer() { + ports = append(ports, newContainerPort( + topology.CONFIG_LISTEN_EXTERNAL_PORT.Key(), + int32(dc.GetListenExternalPort()), + int32(dc.GetListenExternalPort()), + )) + } + case topology.ROLE_SNAPSHOTCLONE: + ports = append(ports, newContainerPort( + topology.CONFIG_LISTEN_DUMMY_PORT.Key(), + int32(dc.GetListenDummyPort()), + int32(dc.GetListenDummyPort()), + )) + case topology.ROLE_METASERVER: + if dc.GetEnableExternalServer() { + ports = append(ports, newContainerPort( + topology.CONFIG_LISTEN_EXTERNAL_PORT.Key(), + int32(dc.GetListenExternalPort()), + int32(dc.GetListenExternalPort()), + )) + } + } + + return ports +} diff --git a/pkg/service/volume.go b/pkg/service/volume.go new file mode 100644 index 00000000..89647eef --- /dev/null +++ b/pkg/service/volume.go @@ -0,0 +1,168 @@ +package service + +import ( + "path" + "strings" + + v1 "k8s.io/api/core/v1" + + "github.com/opencurve/curve-operator/pkg/topology" + "github.com/opencurve/curve-operator/pkg/utils" +) + +const ( + DATA_VOLUME = "data-volume" + LOG_VOLUME = "log-volume" +) + +// A DataPathMap is a struct which contains information about where Curve service data is stored in +// containers and whether the data should be persisted to the host. If it is persisted to the host, +// directory on the host where the specific service's data is stored is given. +type DataPathMap struct { + // HostDataDir should be set to the path on the host + // where the specific service's data is stored. + HostDataDir string + + // HostLogDir should be set to the path on the host + // where the specific service's log is stored. + HostLogDir string + + // ContainerDataDir should be set to the path in the container + // where the specific service's data is stored. + ContainerDataDir string + + // ContainerDataDir should be set to the path in the container + // where the specific service's log is stored. + ContainerLogDir string +} + +// NewServiceDataPathMap returns a new DataPathMap for a service which does not utilize a data +// dir in the container as the mon, mgr, osd, mds, and rgw service do. +func NewServiceDataPathMap(hostDataDir string, hostLogDir string, containerDataDir string, containerLogDir string) *DataPathMap { + return &DataPathMap{ + HostDataDir: hostDataDir, + HostLogDir: hostLogDir, + ContainerDataDir: containerDataDir, + ContainerLogDir: containerLogDir, + } +} + +// getServiceHostPathVolumeAndMount +func getServiceHostPathVolumeAndMount(dc *topology.DeployConfig) ([]v1.Volume, []v1.VolumeMount) { + layout := dc.GetProjectLayout() + dataPaths := &DataPathMap{ + HostDataDir: dc.GetDataDir(), + HostLogDir: dc.GetLogDir(), + ContainerDataDir: layout.ServiceDataDir, + ContainerLogDir: layout.ServiceLogDir, + } + + // create Data hostpath volume and log hostpath volume + vols, mounts := []v1.Volume{}, []v1.VolumeMount{} + hostPathType := v1.HostPathDirectoryOrCreate + if dataPaths != nil && dataPaths.HostDataDir != "" { + src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostDataDir, Type: &hostPathType}} + vols = append(vols, v1.Volume{Name: DATA_VOLUME, VolumeSource: src}) + } + + if dataPaths != nil && dataPaths.HostLogDir != "" { + src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostLogDir, Type: &hostPathType}} + vols = append(vols, v1.Volume{Name: LOG_VOLUME, VolumeSource: src}) + } + + // create data mount path and log mount path on container + if dataPaths != nil && dataPaths.ContainerDataDir != "" { + mounts = append(mounts, v1.VolumeMount{Name: DATA_VOLUME, MountPath: dataPaths.ContainerDataDir}) + } + + if dataPaths != nil && dataPaths.ContainerLogDir != "" { + mounts = append(mounts, v1.VolumeMount{Name: LOG_VOLUME, MountPath: dataPaths.ContainerLogDir}) + } + + return vols, mounts +} + +// getVolumeAndMount Create configmap volume and volumeMount for specified key +func getServiceConfigMapVolumeAndMount(dataKey, mountDir string) (v1.Volume, v1.VolumeMount) { + configMapVolSource := &v1.ConfigMapVolumeSource{} + mode := int32(0644) + subPath := strings.Split(dataKey, "_")[1] + configMapVolSource = &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{Name: utils.AFTER_MUTATE_CONF}, + Items: []v1.KeyToPath{{Key: dataKey, Path: subPath, Mode: &mode}}, + } + + volumeName := dataKey + vol := v1.Volume{ + Name: volumeName, + VolumeSource: v1.VolumeSource{ + ConfigMap: configMapVolSource, + }, + } + + vm := v1.VolumeMount{ + Name: volumeName, + ReadOnly: true, // should be no reason to write to the config in pods, so enforce this + MountPath: path.Join(mountDir, subPath), + SubPath: subPath, + } + + return vol, vm +} + +// getToolsAndTopoVolumeAndMount for create-pool job using +func getToolsAndTopoVolumeAndMount(dc *topology.DeployConfig) ([]v1.Volume, []v1.VolumeMount) { + vols, volMounts := []v1.Volume{}, []v1.VolumeMount{} + mode := int32(0644) + subPath := topology.LAYOUT_TOOLS_NAME + + toolVolSource := &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: utils.AFTER_MUTATE_CONF, + }, + Items: []v1.KeyToPath{ + { + Key: topology.LAYOUT_TOOLS_NAME, + Path: subPath, + Mode: &mode, + }, + }, + } + toolConfigVol := v1.Volume{ + Name: utils.AFTER_MUTATE_CONF, + VolumeSource: v1.VolumeSource{ + ConfigMap: toolVolSource, + }, + } + + toolVolMount := v1.VolumeMount{ + Name: utils.AFTER_MUTATE_CONF, + ReadOnly: true, // should be no reason to write to the config in pods, so enforce this + MountPath: dc.GetProjectLayout().ToolsConfSystemPath, + SubPath: subPath, + } + vols = append(vols, toolConfigVol) + volMounts = append(volMounts, toolVolMount) + + topoVolSource := &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: CURVE_TOPOLOGY_CONFIGMAP, + }, + } + topoVol := v1.Volume{ + Name: CURVE_TOPOLOGY_CONFIGMAP, + VolumeSource: v1.VolumeSource{ + ConfigMap: topoVolSource, + }, + } + topoVolMount := v1.VolumeMount{ + Name: CURVE_TOPOLOGY_CONFIGMAP, + ReadOnly: true, + MountPath: dc.GetProjectLayout().ToolsConfDir, + } + + vols = append(vols, topoVol) + volMounts = append(volMounts, topoVolMount) + + return vols, volMounts +} diff --git a/pkg/snapshotclone/config.go b/pkg/snapshotclone/config.go deleted file mode 100644 index baa4f78b..00000000 --- a/pkg/snapshotclone/config.go +++ /dev/null @@ -1,51 +0,0 @@ -package snapshotclone - -import "github.com/opencurve/curve-operator/pkg/config" - -// snapConfig implements config.ConfigInterface -var _ config.ConfigInterface = &snapConfig{} - -// snapConfig for a single snap -type snapConfig struct { - Prefix string - ServiceAddr string - ServicePort string - ServiceDummyPort string - ServiceProxyPort string - ClusterEtcdAddr string - ClusterMdsAddr string - - ResourceName string - CurrentConfigMapName string - DaemonID string - DataPathMap *config.DataPathMap -} - -func (c *snapConfig) GetPrefix() string { return c.Prefix } -func (c *snapConfig) GetServiceId() string { return "" } -func (c *snapConfig) GetServiceRole() string { return "" } -func (c *snapConfig) GetServiceHost() string { return "" } -func (c *snapConfig) GetServiceHostSequence() string { return "" } -func (c *snapConfig) GetServiceReplicaSequence() string { return "" } -func (c *snapConfig) GetServiceReplicasSequence() string { return "" } -func (c *snapConfig) GetServiceAddr() string { return c.ServiceAddr } -func (c *snapConfig) GetServicePort() string { return c.ServicePort } -func (c *snapConfig) GetServiceClientPort() string { return "" } -func (c *snapConfig) GetServiceDummyPort() string { return c.ServiceDummyPort } -func (c *snapConfig) GetServiceProxyPort() string { return c.ServiceProxyPort } -func (c *snapConfig) GetServiceExternalAddr() string { return "" } -func (c *snapConfig) GetServiceExternalPort() string { return "" } -func (c *snapConfig) GetLogDir() string { return "" } -func (c *snapConfig) GetDataDir() string { return "" } - -func (c *snapConfig) GetClusterEtcdHttpAddr() string { return "" } -func (c *snapConfig) GetClusterEtcdAddr() string { return c.ClusterEtcdAddr } -func (c *snapConfig) GetClusterMdsAddr() string { return c.ClusterMdsAddr } -func (c *snapConfig) GetClusterMdsDummyAddr() string { return "" } -func (c *snapConfig) GetClusterMdsDummyPort() string { return "" } -func (c *snapConfig) GetClusterChunkserverAddr() string { return "" } -func (c *snapConfig) GetClusterMetaserverAddr() string { return "" } -func (c *snapConfig) GetClusterSnapshotcloneAddr() string { return "" } -func (c *snapConfig) GetClusterSnapshotcloneProxyAddr() string { return "" } -func (c *snapConfig) GetClusterSnapshotcloneDummyPort() string { return "" } -func (c *snapConfig) GetClusterSnapshotcloneNginxUpstream() string { return "" } diff --git a/pkg/snapshotclone/snapshotclone.go b/pkg/snapshotclone/snapshotclone.go deleted file mode 100644 index b73a3062..00000000 --- a/pkg/snapshotclone/snapshotclone.go +++ /dev/null @@ -1,172 +0,0 @@ -package snapshotclone - -import ( - "context" - "fmt" - "path" - "strconv" - - "github.com/coreos/pkg/capnslog" - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - curvev1 "github.com/opencurve/curve-operator/api/v1" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/k8sutil" - "github.com/opencurve/curve-operator/pkg/topology" -) - -const ( - AppName = "curve-snapshotclone" - ConfigMapNamePrefix = "curve-snapshotclone-conf" - - // ContainerPath is the mount path of data and log - - Prefix = "/curvebs/snapshotclone" - ContainerDataDir = "/curvebs/snapshotclone/data" - ContainerLogDir = "/curvebs/snapshotclone/logs" -) - -type Cluster struct { - *daemon.Cluster -} - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "snapshotclone") - -func New(c *daemon.Cluster) *Cluster { - return &Cluster{Cluster: c} -} - -// Start Curve snapshotclone daemon -func (c *Cluster) Start(nodesInfo []daemon.NodeInfo, dcs []*topology.DeployConfig) ([]*topology.DeployConfig, error) { - logger.Info("starting snapshotclone server") - - // get clusterEtcdAddr - etcdOverrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.EtcdOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to get %s configmap from cluster", config.EtcdOverrideConfigMapName) - } - clusterEtcdAddr := etcdOverrideCM.Data[config.ClusterEtcdAddr] - - // get clusterMdsAddr - mdsOverrideCM, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.MdsOverrideConfigMapName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrap(err, "failed to get mds override endoints configmap") - } - clusterMdsAddr := mdsOverrideCM.Data[config.MdsOvverideConfigMapDataKey] - - err = c.UpdateSpecRoleAllConfigMap(config.SnapShotCloneAllConfigMapName, config.StartSnapConfigMapDataKey, START, nil) - if err != nil { - return nil, err - } - - var deploymentsToWaitFor []*appsv1.Deployment - for _, node := range nodesInfo { - daemonIDString := k8sutil.IndexToName(node.HostID) - resourceName := fmt.Sprintf("%s-%s", AppName, daemonIDString) - currentConfigMapName := fmt.Sprintf("%s-%s", ConfigMapNamePrefix, daemonIDString) - - snapConfig := &snapConfig{ - Prefix: Prefix, - ServiceAddr: node.NodeIP, - ServicePort: strconv.Itoa(node.SnapshotClonePort), - ServiceDummyPort: strconv.Itoa(node.SnapshotCloneDummyPort), - ServiceProxyPort: strconv.Itoa(node.SnapshotCloneProxyPort), - ClusterEtcdAddr: clusterEtcdAddr, - ClusterMdsAddr: clusterMdsAddr, - - DaemonID: daemonIDString, - ResourceName: resourceName, - CurrentConfigMapName: currentConfigMapName, - DataPathMap: config.NewDaemonDataPathMap( - path.Join(c.DataDirHostPath, fmt.Sprint("snapshotclone-", daemonIDString)), - path.Join(c.LogDirHostPath, fmt.Sprint("snapshotclone-", daemonIDString)), - ContainerDataDir, - ContainerLogDir, - ), - } - - dc := &topology.DeployConfig{ - Kind: c.Kind, - Role: config.ROLE_SNAPSHOTCLONE, - NodeName: node.NodeName, - NodeIP: node.NodeIP, - Port: node.SnapshotCloneDummyPort, - ReplicasSequence: node.ReplicasSequence, - Replicas: len(c.Nodes), - StandAlone: node.StandAlone, - } - dcs = append(dcs, dc) - - err = c.prepareConfigMap(snapConfig) - if err != nil { - return nil, err - } - - // make snapshotclone deployment - d, err := c.makeDeployment(node.NodeName, node.NodeIP, snapConfig) - if err != nil { - return nil, err - } - - newDeployment, err := c.Context.Clientset.AppsV1().Deployments(c.NamespacedName.Namespace).Create(d) - if err != nil { - if !kerrors.IsAlreadyExists(err) { - return nil, errors.Wrapf(err, "failed to create snapshotclone deployment %q in cluster", snapConfig.ResourceName) - } - logger.Infof("deployment %v for snapshotclone already exists. updating if needed", snapConfig.ResourceName) - - // TODO:Update the daemon Deployment - // if err := updateDeploymentAndWait(c.Context, c.clusterInfo, d, config.MgrType, mgrConfig.DaemonID, c.spec.SkipUpgradeChecks, false); err != nil { - // logger.Errorf("failed to update mgr deployment %q. %v", resourceName, err) - // } - } else { - logger.Infof("Deployment %q has been created, waiting for startup", newDeployment.GetName()) - deploymentsToWaitFor = append(deploymentsToWaitFor, newDeployment) - } - } - - // wait all Deployments to start - for _, d := range deploymentsToWaitFor { - if err := k8sutil.WaitForDeploymentToStart(context.TODO(), &c.Context, d); err != nil { - return nil, err - } - } - - k8sutil.UpdateStatusCondition(c.Kind, context.TODO(), &c.Context, c.NamespacedName, curvev1.ConditionTypeSnapShotCloneReady, curvev1.ConditionTrue, curvev1.ConditionSnapShotCloneClusterCreatedReason, "Snapshotclone cluster has been created") - - return dcs, nil -} - -// prepareConfigMap -func (c *Cluster) prepareConfigMap(snapConfig *snapConfig) error { - // 1. get s3 configmap that must has been created by chunkserver - _, err := c.Context.Clientset.CoreV1().ConfigMaps(c.NamespacedName.Namespace).Get(config.ChunkserverAllConfigMapName, metav1.GetOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to get %s configmap from cluster", config.ChunkserverAllConfigMapName) - } - logger.Infof("check %s configmap has been exist", config.ChunkserverAllConfigMapName) - - // 2. create snap_client.conf configmap - err = c.UpdateSpecRoleAllConfigMap(config.SnapShotCloneAllConfigMapName, config.SnapClientConfigMapDataKey, "", snapConfig) - if err != nil { - return err - } - - // 3. create nginx.conf configmap - err = c.UpdateSpecRoleAllConfigMap(config.SnapShotCloneAllConfigMapName, config.NginxConfigMapDataKey, "", snapConfig) - if err != nil { - return err - } - - // 4. create snapshotclone.conf configmap - err = c.CreateEachConfigMap(config.SnapShotCloneConfigMapDataKey, snapConfig, snapConfig.CurrentConfigMapName) - if err != nil { - return err - } - - return nil -} diff --git a/pkg/snapshotclone/spec.go b/pkg/snapshotclone/spec.go deleted file mode 100644 index 12bde103..00000000 --- a/pkg/snapshotclone/spec.go +++ /dev/null @@ -1,142 +0,0 @@ -package snapshotclone - -import ( - "fmt" - "path" - "strconv" - - "github.com/pkg/errors" - apps "k8s.io/api/apps/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/opencurve/curve-operator/pkg/logrotate" -) - -// makeDeployment make snapshotclone deployment to run snapshotclone daemon -func (c *Cluster) makeDeployment(nodeName string, nodeIP string, snapConfig *snapConfig) (*apps.Deployment, error) { - volumes := SnapDaemonVolumes(snapConfig) - labels := daemon.CephDaemonAppLabels(AppName, c.Namespace, "snapshotclone", snapConfig.DaemonID, c.Kind) - - // add log config volume - logConfCMVolSource := &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "log-conf"}} - volumes = append(volumes, v1.Volume{Name: "log-conf", VolumeSource: v1.VolumeSource{ConfigMap: logConfCMVolSource}}) - - // for debug - // log.Infof("snapConfig %+v", snapConfig) - - runAsUser := int64(0) - runAsNonRoot := false - - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: snapConfig.ResourceName, - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - c.makeSnapshotDaemonContainer(nodeIP, snapConfig), - logrotate.MakeLogrotateContainer(), - }, - NodeName: nodeName, - RestartPolicy: v1.RestartPolicyAlways, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - SecurityContext: &v1.PodSecurityContext{ - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - }, - }, - } - - replicas := int32(1) - - d := &apps.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: snapConfig.ResourceName, - Namespace: c.NamespacedName.Namespace, - Labels: labels, - }, - Spec: apps.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: labels, - }, - Template: podSpec, - Replicas: &replicas, - Strategy: apps.DeploymentStrategy{ - Type: apps.RecreateDeploymentStrategyType, - }, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(d) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to chunkserver deployment %q", d.Name) - } - - return d, nil -} - -// makeSnapshotDaemonContainer create snapshotclone container -func (c *Cluster) makeSnapshotDaemonContainer(nodeIP string, snapConfig *snapConfig) v1.Container { - privileged := true - runAsUser := int64(0) - runAsNonRoot := false - readOnlyRootFilesystem := false - - argsNginxConf := path.Join(config.NginxConfigMapMountPath, config.NginxConfigMapDataKey) - configFileMountPath := path.Join(config.SnapShotCloneConfigMapMountPath, config.SnapShotCloneConfigMapDataKey) - argsConfigFileDir := fmt.Sprintf("--conf=%s", configFileMountPath) - - port, _ := strconv.Atoi(snapConfig.ServicePort) - dummyPort, _ := strconv.Atoi(snapConfig.ServiceDummyPort) - proxyPort, _ := strconv.Atoi(snapConfig.ServiceProxyPort) - - container := v1.Container{ - Name: "snapshotclone", - Command: []string{ - "/bin/bash", - config.StartSnapConfigMapMountPath, - }, - Args: []string{ - argsNginxConf, - argsConfigFileDir, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: SnapDaemonVolumeMounts(snapConfig), - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, - }, - Ports: []v1.ContainerPort{ - { - Name: "listen-port", - ContainerPort: int32(port), - HostPort: int32(port), - Protocol: v1.ProtocolTCP, - }, - { - Name: "dummy-port", - ContainerPort: int32(dummyPort), - HostPort: int32(dummyPort), - Protocol: v1.ProtocolTCP, - }, - { - Name: "proxy-port", - ContainerPort: int32(proxyPort), - HostPort: int32(proxyPort), - Protocol: v1.ProtocolTCP, - }, - }, - Env: []v1.EnvVar{{Name: "TZ", Value: "Asia/Hangzhou"}}, - } - - return container -} diff --git a/pkg/snapshotclone/start.go b/pkg/snapshotclone/start.go deleted file mode 100644 index 096a240a..00000000 --- a/pkg/snapshotclone/start.go +++ /dev/null @@ -1,18 +0,0 @@ -package snapshotclone - -var START = ` - -argsNginxConf=$1 -argsConfigFileDir=$2 - -/usr/sbin/nginx -c $argsNginxConf - -# sleep 5 second to wait nginx startup -sleep 10 - -# for test -#while true; do echo hello; sleep 10;done - -cd /curvebs/snapshotclone/sbin -./curvebs-snapshotclone $2 -` diff --git a/pkg/snapshotclone/volume.go b/pkg/snapshotclone/volume.go deleted file mode 100644 index 1f73353d..00000000 --- a/pkg/snapshotclone/volume.go +++ /dev/null @@ -1,164 +0,0 @@ -package snapshotclone - -import ( - "path" - - v1 "k8s.io/api/core/v1" - - "github.com/opencurve/curve-operator/pkg/config" -) - -// SnapDaemonVolumes returns the pod volumes used only by snapshotclone -func SnapDaemonVolumes(snapConfig *snapConfig) []v1.Volume { - vols := []v1.Volume{} - // create configmap volume - configMapVolumes, _ := SnapConfigMapVolumeAndMount(snapConfig) - vols = append(vols, configMapVolumes...) - - hostPathType := v1.HostPathDirectoryOrCreate - // create data volume - src := v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: snapConfig.DataPathMap.HostDataDir, Type: &hostPathType}} - vols = append(vols, v1.Volume{Name: "data-volume", VolumeSource: src}) - - // create log volume - src = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: snapConfig.DataPathMap.HostLogDir, Type: &hostPathType}} - vols = append(vols, v1.Volume{Name: "log-volume", VolumeSource: src}) - - return vols -} - -// SnapDaemonVolumeMounts returns the pod container volume mounth used only by chunkserver -func SnapDaemonVolumeMounts(snapConfig *snapConfig) []v1.VolumeMount { - mounts := []v1.VolumeMount{} - - // create configmap mount path - _, configMapMounts := SnapConfigMapVolumeAndMount(snapConfig) - mounts = append(mounts, configMapMounts...) - - // create data mount path and log mount path on container - // create data mount path and log mount path on container - mounts = append(mounts, v1.VolumeMount{Name: "data-volume", MountPath: snapConfig.DataPathMap.ContainerDataDir}) - mounts = append(mounts, v1.VolumeMount{Name: "log-volume", MountPath: snapConfig.DataPathMap.ContainerLogDir}) - - return mounts -} - -// SnapConfigMapVolumeAndMount Create configmap volume and volume mount for daemon chunkserver pod -func SnapConfigMapVolumeAndMount(snapConfig *snapConfig) ([]v1.Volume, []v1.VolumeMount) { - vols := []v1.Volume{} - mounts := []v1.VolumeMount{} - - // nginx.conf - mode := int32(0644) - nginxConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: config.SnapShotCloneAllConfigMapName}, - Items: []v1.KeyToPath{{Key: config.NginxConfigMapDataKey, Path: config.NginxConfigMapDataKey, Mode: &mode}}, - } - nginxConfigVol := v1.Volume{ - Name: "nginx-conf", - VolumeSource: v1.VolumeSource{ - ConfigMap: nginxConfigMapVolSource, - }, - } - vols = append(vols, nginxConfigVol) - - // start_snap.sh - startSnapConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: config.SnapShotCloneAllConfigMapName}, - Items: []v1.KeyToPath{{Key: config.StartSnapConfigMapDataKey, Path: config.StartSnapConfigMapDataKey, Mode: &mode}}, - } - startSnapConfigVol := v1.Volume{ - Name: "start-snapshot", - VolumeSource: v1.VolumeSource{ - ConfigMap: startSnapConfigMapVolSource, - }, - } - vols = append(vols, startSnapConfigVol) - - // snap_client.conf - snapClientConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: config.SnapShotCloneAllConfigMapName}, - Items: []v1.KeyToPath{{Key: config.SnapClientConfigMapDataKey, Path: config.SnapClientConfigMapDataKey, Mode: &mode}}, - } - snapClientConfigVol := v1.Volume{ - Name: "snap-client-conf", - VolumeSource: v1.VolumeSource{ - ConfigMap: snapClientConfigMapVolSource, - }, - } - vols = append(vols, snapClientConfigVol) - - // s3.conf - S3ConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: config.SnapShotCloneAllConfigMapName}, - Items: []v1.KeyToPath{{Key: config.S3ConfigMapDataKey, Path: config.S3ConfigMapDataKey, Mode: &mode}}, - } - S3ConfigVol := v1.Volume{ - Name: "s3-conf", - VolumeSource: v1.VolumeSource{ - ConfigMap: S3ConfigMapVolSource, - }, - } - vols = append(vols, S3ConfigVol) - - // snapshotclone.conf - snapShotConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{Name: snapConfig.CurrentConfigMapName}, - Items: []v1.KeyToPath{{Key: config.SnapShotCloneConfigMapDataKey, Path: config.SnapShotCloneConfigMapDataKey, Mode: &mode}}, - } - configVol := v1.Volume{ - Name: snapConfig.CurrentConfigMapName, - VolumeSource: v1.VolumeSource{ - ConfigMap: snapShotConfigMapVolSource, - }, - } - vols = append(vols, configVol) - - // nginx.conf volumeMount - nginxMountPath := v1.VolumeMount{ - Name: "nginx-conf", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.NginxConfigMapMountPath, config.NginxConfigMapDataKey), - SubPath: config.NginxConfigMapDataKey, - } - mounts = append(mounts, nginxMountPath) - - // start_snap.sh volumeMount - startSnapMountPath := v1.VolumeMount{ - Name: "start-snapshot", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: config.StartSnapConfigMapMountPath, - SubPath: config.StartSnapConfigMapDataKey, - } - mounts = append(mounts, startSnapMountPath) - - // snap_client.conf volumeMount - snapClientMountPath := v1.VolumeMount{ - Name: "snap-client-conf", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.SnapClientConfigMapMountPath, config.SnapClientConfigMapDataKey), - SubPath: config.SnapClientConfigMapDataKey, - } - mounts = append(mounts, snapClientMountPath) - - // s3.conf volumeMount - S3MountPath := v1.VolumeMount{ - Name: "s3-conf", - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.S3ConfigMapMountSnapPathDir, config.S3ConfigMapDataKey), - SubPath: config.S3ConfigMapDataKey, - } - mounts = append(mounts, S3MountPath) - - // snapshotclone volume mount path - m := v1.VolumeMount{ - Name: snapConfig.CurrentConfigMapName, - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: path.Join(config.SnapShotCloneConfigMapMountPath, config.SnapShotCloneConfigMapDataKey), - SubPath: config.SnapShotCloneConfigMapDataKey, - } - - mounts = append(mounts, m) - - return vols, mounts -} diff --git a/pkg/topology/const.go b/pkg/topology/const.go new file mode 100644 index 00000000..ddd8fd23 --- /dev/null +++ b/pkg/topology/const.go @@ -0,0 +1,28 @@ +package topology + +const ( + KIND_CURVEBS = "curvebs" + KIND_CURVEFS = "curvefs" +) + +const ( + ROLE_ETCD = "etcd" + ROLE_MDS = "mds" + ROLE_CHUNKSERVER = "chunkserver" + ROLE_SNAPSHOTCLONE = "snapshotclone" + ROLE_METASERVER = "metaserver" +) + +var ( + CURVEBS_ROLES = []string{ + ROLE_ETCD, + ROLE_MDS, + ROLE_CHUNKSERVER, + ROLE_SNAPSHOTCLONE, + } + CURVEFS_ROLES = []string{ + ROLE_ETCD, + ROLE_MDS, + ROLE_METASERVER, + } +) diff --git a/pkg/topology/dc.go b/pkg/topology/dc.go index b1d92002..761f13a5 100644 --- a/pkg/topology/dc.go +++ b/pkg/topology/dc.go @@ -1,24 +1,396 @@ package topology import ( - "github.com/opencurve/curve-operator/pkg/clusterd" - v1 "k8s.io/api/core/v1" + "fmt" + "path" + + "github.com/opencurve/curve-operator/pkg/utils" +) + +const ( + // service project layout + LAYOUT_CURVEFS_ROOT_DIR = "/curvefs" + LAYOUT_CURVEBS_ROOT_DIR = "/curvebs" + LAYOUT_PLAYGROUND_ROOT_DIR = "playground" + LAYOUT_CONF_SRC_DIR = "/conf" + LAYOUT_SERVICE_BIN_DIR = "/sbin" + LAYOUT_SERVICE_CONF_DIR = "/conf" + LAYOUT_SERVICE_LOG_DIR = "/logs" + LAYOUT_SERVICE_DATA_DIR = "/data" + LAYOUT_TOOLS_DIR = "/tools" + LAYOUT_TOOLS_V2_DIR = "/tools-v2" + LAYOUT_CURVEBS_CHUNKFILE_POOL_DIR = "chunkfilepool" + LAYOUT_CURVEBS_COPYSETS_DIR = "copysets" + LAYOUT_CURVEBS_RECYCLER_DIR = "recycler" + LAYOUT_CURVEBS_TOOLS_CONFIG_SYSTEM_PATH = "/etc/curve/tools.conf" + LAYOUT_CURVEFS_TOOLS_CONFIG_SYSTEM_PATH = "/etc/curvefs/tools.conf" + LAYOUT_CURVE_TOOLS_V2_CONFIG_SYSTEM_PATH = "/etc/curve/curve.yaml" + LAYOUT_CORE_SYSTEM_DIR = "/core" + LAYOUT_TOOLS_NAME = "tools.conf" + + BINARY_CURVEBS_TOOL = "curvebs-tool" + BINARY_CURVEBS_FORMAT = "curve_format" + BINARY_CURVEFS_TOOL = "curvefs_tool" + BINARY_CURVE_TOOL_V2 = "curve" + METAFILE_CHUNKFILE_POOL = "chunkfilepool.meta" + METAFILE_CHUNKSERVER_ID = "chunkserver.dat" +) + +const ( + DEFAULT_CONFIG_DELIMITER = "=" + ETCD_CONFIG_DELIMITER = ": " +) + +var ( + DefaultCurveBSDeployConfig = &DeployConfig{kind: KIND_CURVEBS} + DefaultCurveFSDeployConfig = &DeployConfig{kind: KIND_CURVEFS} + + ServiceConfigs = map[string][]string{ + ROLE_ETCD: {"etcd.conf"}, + ROLE_MDS: {"mds.conf"}, + ROLE_CHUNKSERVER: {"chunkserver.conf", "cs_client.conf", "s3.conf"}, + ROLE_SNAPSHOTCLONE: {"snapshotclone.conf", "snap_client.conf", "s3.conf", "nginx.conf"}, + ROLE_METASERVER: {"metaserver.conf"}, + } ) type DeployConfig struct { - Ctx clusterd.Context - Namespace string - Image string - ImagePullPolicy v1.PullPolicy - Kind string - Role string - Copysets int - NodeName string - NodeIP string - Port int - DeviceName string - HostSequence int - ReplicasSequence int - Replicas int - StandAlone bool + kind string + id string + parentId string + name string + role string + host string + hostIp string + hostSequence int + instances int // replicas number + instancesSequence int + + variables *Variables + config map[string]string + serviceConfig map[string]string +} + +func (dc *DeployConfig) get(i *item) interface{} { + if v, ok := dc.config[i.key]; ok { + return v + } + + return nil +} + +func (dc *DeployConfig) getString(i *item) string { + v := dc.get(i) + if v == nil { + return "" + } + return v.(string) +} + +func (dc *DeployConfig) getInt(i *item) int { + v := dc.get(i) + if v == nil { + return 0 + } + return v.(int) +} + +func (dc *DeployConfig) getBool(i *item) bool { + v := dc.get(i) + if v == nil { + return false + } + return v.(bool) +} + +// 1. config property +func (dc *DeployConfig) GetKind() string { return dc.kind } +func (dc *DeployConfig) GetId() string { return dc.id } +func (dc *DeployConfig) GetParentId() string { return dc.parentId } +func (dc *DeployConfig) GetName() string { return dc.name } +func (dc *DeployConfig) GetRole() string { return dc.role } +func (dc *DeployConfig) GetHost() string { return dc.host } +func (dc *DeployConfig) GetHostIp() string { return dc.hostIp } +func (dc *DeployConfig) GetInstances() int { return dc.instances } +func (dc *DeployConfig) GetHostSequence() int { return dc.hostSequence } +func (dc *DeployConfig) GetInstancesSequence() int { return dc.instancesSequence } +func (dc *DeployConfig) GetVariables() *Variables { return dc.variables } +func (dc *DeployConfig) GetConfig() map[string]string { return dc.config } +func (dc *DeployConfig) GetServiceConfig() map[string]string { return dc.serviceConfig } + +// 2. config item +func (dc *DeployConfig) GetPrefix() string { + if dc.GetKind() == KIND_CURVEBS { + return path.Join(LAYOUT_CURVEBS_ROOT_DIR, dc.GetRole()) + } + return path.Join(LAYOUT_CURVEFS_ROOT_DIR, dc.GetRole()) +} +func (dc *DeployConfig) GetContainerImage() string { return dc.getString(CONFIG_CONTAINER_IMAGE) } +func (dc *DeployConfig) GetDataDir() string { return dc.getString(CONFIG_DATA_DIR) } +func (dc *DeployConfig) GetLogDir() string { return dc.getString(CONFIG_LOG_DIR) } +func (dc *DeployConfig) GetCopysets() int { return dc.getInt(CONFIG_COPYSETS) } +func (dc *DeployConfig) GetListenPort() int { return dc.getInt(CONFIG_LISTEN_PORT) } +func (dc *DeployConfig) GetListenClientPort() int { return dc.getInt(CONFIG_LISTEN_CLIENT_PORT) } +func (dc *DeployConfig) GetListenDummyPort() int { return dc.getInt(CONFIG_LISTEN_DUMMY_PORT) } +func (dc *DeployConfig) GetListenProxyPort() int { return dc.getInt(CONFIG_LISTEN_PROXY_PORT) } +func (dc *DeployConfig) GetListenExternalIp() string { return dc.getString(CONFIG_LISTEN_EXTERNAL_IP) } +func (dc *DeployConfig) GetListenExternalPort() int { + if dc.GetEnableExternalServer() { + return dc.getInt(CONFIG_LISTEN_EXTERNAL_PORT) + } + return dc.GetListenPort() +} +func (dc *DeployConfig) GetEnableExternalServer() bool { + return dc.getBool(CONFIG_ENABLE_EXTERNAL_SERVER) +} + +func (dc *DeployConfig) GetConfigKvFilter() string { + switch dc.GetRole() { + case ROLE_ETCD: + return ETCD_CONFIG_DELIMITER + default: + return DEFAULT_CONFIG_DELIMITER + } +} + +func (dc *DeployConfig) convert() error { + for k, v := range dc.config { + item := itemset.get(k) + if item == nil || !item.exclude { + dc.serviceConfig[k] = v + } + } + + // convret config item to its require type, + // return error if convert failed + // for _, item := range itemset.getAll() { + // k := item.key + // value := dc.get(item) // return config value or default value + // if value == nil { + // continue + // } + // v, ok := utils.All2Str(value) + // if !ok { + // return errors.New("Unknown type") + // } + + // switch item.require { + // case REQUIRE_ANY: + // // do nothing + // case REQUIRE_INT: + // if intv, ok := utils.Str2Int(v); !ok { + // return errors.New("configure value require integer") + // } else { + // dc.config[k] = intv + // } + // case REQUIRE_STRING: + // if len(v) == 0 { + // return errors.New("configure value require string") + // } + // case REQUIRE_BOOL: + // if boolv, ok := utils.Str2Bool(v); !ok { + // return errors.New("configure value require bool") + // } else { + // dc.config[k] = boolv + // } + // case REQUIRE_POSITIVE_INTEGER: + // if intv, ok := utils.Str2Int(v); !ok { + // return errors.New("configure value require positive integer") + // } else if intv <= 0 { + // return errors.New("configure value require negative integer") + // } else { + // dc.config[k] = intv + // } + // } + // } + return nil +} + +// 3: service project layout +/* /curvebs + * ├── conf + * │ ├── chunkserver.conf + * │ ├── etcd.conf + * │ ├── mds.conf + * │ └── tools.conf + * ├── etcd + * │ ├── conf + * │ ├── data + * │ ├── log + * │ └── sbin + * ├── mds + * │ ├── conf + * │ ├── data + * │ ├── log + * │ └── sbin + * ├── chunkserver + * │ ├── conf + * │ ├── data + * │ ├── log + * │ └── sbin + * ├── snapshotclone + * │ ├── conf + * │ ├── data + * │ ├── log + * │ └── sbin + * └── tools + * ├── conf + * ├── data + * ├── log + * └── sbin + */ +type ( + ConfFile struct { + Name string + Path string + SourcePath string + } + + Layout struct { + // project: curvebs/curvefs + ProjectRootDir string // /curvebs + + PlaygroundRootDir string // /curvebs/playground + + // service + ServiceRootDir string // /curvebs/mds + ServiceBinDir string // /curvebs/mds/sbin + ServiceBinPath string // /curvebs/mds/sbin/mds + ServiceLogDir string // /curvebs/mds/logs + ServiceDataDir string // /curvebs/mds/data + ServiceConfDir string // /curvebs/mds/conf + ServiceConfPath string // /curvebs/mds/conf/mds.conf + ServiceConfSrcDir string // /curvebs/conf + ServiceConfSrcPath string // /curvebs/conf/mds.conf + ServiceConfFiles []ConfFile + + // tools + ToolsRootDir string // /curvebs/tools + ToolsBinDir string // /curvebs/tools/sbin + ToolsDataDir string // /curvebs/tools/data + ToolsConfDir string // /curvebs/tools/conf + ToolsConfPath string // /curvebs/tools/conf/tools.conf + ToolsConfSrcPath string // /curvebs/conf/tools.conf + ToolsConfSystemPath string // /etc/curve/tools.conf + ToolsBinaryPath string // /curvebs/tools/sbin/curvebs-tool + + // tools-v2 + ToolsV2ConfSrcPath string // /curvebs/conf/curve.yaml + ToolsV2ConfSystemPath string // /etc/curve/curve.yaml + ToolsV2BinaryPath string // /curvebs/tools-v2/sbin/curve + + // format + FormatBinaryPath string // /curvebs/tools/sbin/curve_format + ChunkfilePoolRootDir string // /curvebs/chunkserver/data + ChunkfilePoolDir string // /curvebs/chunkserver/data/chunkfilepool + ChunkfilePoolMetaPath string // /curvebs/chunkserver/data/chunkfilepool.meta + + // core + CoreSystemDir string + } +) + +func (dc *DeployConfig) GetProjectLayout() Layout { + kind := dc.GetKind() + role := dc.GetRole() + // project + root := utils.Choose(kind == KIND_CURVEBS, LAYOUT_CURVEBS_ROOT_DIR, LAYOUT_CURVEFS_ROOT_DIR) + + // service + confSrcDir := root + LAYOUT_CONF_SRC_DIR + serviceRootDir := dc.GetPrefix() + serviceConfDir := fmt.Sprintf("%s/conf", serviceRootDir) + serviceConfFiles := []ConfFile{} + for _, item := range ServiceConfigs[role] { + serviceConfFiles = append(serviceConfFiles, ConfFile{ + Name: item, + Path: fmt.Sprintf("%s/%s", serviceConfDir, item), + SourcePath: fmt.Sprintf("%s/%s", confSrcDir, item), + }) + } + + // tools + toolsRootDir := root + LAYOUT_TOOLS_DIR + toolsBinDir := toolsRootDir + LAYOUT_SERVICE_BIN_DIR + toolsConfDir := toolsRootDir + LAYOUT_SERVICE_CONF_DIR + toolsBinaryName := utils.Choose(kind == KIND_CURVEBS, BINARY_CURVEBS_TOOL, BINARY_CURVEFS_TOOL) + toolsConfSystemPath := utils.Choose(kind == KIND_CURVEBS, + LAYOUT_CURVEBS_TOOLS_CONFIG_SYSTEM_PATH, + LAYOUT_CURVEFS_TOOLS_CONFIG_SYSTEM_PATH) + + // tools-v2 + toolsV2RootDir := root + LAYOUT_TOOLS_V2_DIR + toolsV2BinDir := toolsV2RootDir + LAYOUT_SERVICE_BIN_DIR + toolsV2BinaryName := BINARY_CURVE_TOOL_V2 + toolsV2ConfSystemPath := LAYOUT_CURVE_TOOLS_V2_CONFIG_SYSTEM_PATH + + // format + chunkserverDataDir := fmt.Sprintf("%s/%s%s", root, ROLE_CHUNKSERVER, LAYOUT_SERVICE_DATA_DIR) + + return Layout{ + // project + ProjectRootDir: root, + + // playground + PlaygroundRootDir: path.Join(root, LAYOUT_PLAYGROUND_ROOT_DIR), + + // service + ServiceRootDir: serviceRootDir, + ServiceBinDir: serviceRootDir + LAYOUT_SERVICE_BIN_DIR, + ServiceBinPath: fmt.Sprintf("%s/%s", serviceRootDir+LAYOUT_SERVICE_BIN_DIR, role), + ServiceLogDir: serviceRootDir + LAYOUT_SERVICE_LOG_DIR, + ServiceDataDir: serviceRootDir + LAYOUT_SERVICE_DATA_DIR, + ServiceConfDir: serviceRootDir + LAYOUT_SERVICE_CONF_DIR, + ServiceConfPath: fmt.Sprintf("%s/%s.conf", serviceConfDir, role), + ServiceConfSrcDir: confSrcDir, + ServiceConfSrcPath: fmt.Sprintf("%s/%s.conf", confSrcDir, role), + ServiceConfFiles: serviceConfFiles, + + // tools + ToolsRootDir: toolsRootDir, + ToolsBinDir: toolsRootDir + LAYOUT_SERVICE_BIN_DIR, + ToolsDataDir: toolsRootDir + LAYOUT_SERVICE_DATA_DIR, + ToolsConfDir: toolsRootDir + LAYOUT_SERVICE_CONF_DIR, + ToolsConfPath: fmt.Sprintf("%s/tools.conf", toolsConfDir), + ToolsConfSrcPath: fmt.Sprintf("%s/tools.conf", confSrcDir), + ToolsConfSystemPath: toolsConfSystemPath, + ToolsBinaryPath: fmt.Sprintf("%s/%s", toolsBinDir, toolsBinaryName), + + // toolsv2 + ToolsV2ConfSrcPath: fmt.Sprintf("%s/curve.yaml", confSrcDir), + ToolsV2ConfSystemPath: toolsV2ConfSystemPath, + ToolsV2BinaryPath: fmt.Sprintf("%s/%s", toolsV2BinDir, toolsV2BinaryName), + + // format + FormatBinaryPath: fmt.Sprintf("%s/%s", toolsBinDir, BINARY_CURVEBS_FORMAT), + ChunkfilePoolRootDir: chunkserverDataDir, + ChunkfilePoolDir: fmt.Sprintf("%s/%s", chunkserverDataDir, LAYOUT_CURVEBS_CHUNKFILE_POOL_DIR), + ChunkfilePoolMetaPath: fmt.Sprintf("%s/%s", chunkserverDataDir, METAFILE_CHUNKFILE_POOL), + + // core + CoreSystemDir: LAYOUT_CORE_SYSTEM_DIR, + } +} + +func GetProjectLayout(kind, role string) Layout { + dc := DeployConfig{kind: kind, role: role} + return dc.GetProjectLayout() +} + +func GetCurveBSProjectLayout() Layout { + return DefaultCurveBSDeployConfig.GetProjectLayout() +} + +func GetCurveFSProjectLayout() Layout { + return DefaultCurveFSDeployConfig.GetProjectLayout() +} + +func FilterDeployConfigByRole(dcs []*DeployConfig, role string) []*DeployConfig { + role2DeployConfigs := []*DeployConfig{} + for _, dc := range dcs { + if dc.GetRole() == role { + role2DeployConfigs = append(role2DeployConfigs, dc) + } + } + return role2DeployConfigs } diff --git a/pkg/topology/dc_item.go b/pkg/topology/dc_item.go new file mode 100644 index 00000000..3673ede3 --- /dev/null +++ b/pkg/topology/dc_item.go @@ -0,0 +1,237 @@ +package topology + +import "path" + +const ( + REQUIRE_ANY = iota + REQUIRE_INT + REQUIRE_STRING + REQUIRE_BOOL + REQUIRE_POSITIVE_INTEGER + + // default value + DEFAULT_REPORT_USAGE = true + DEFAULT_CURVEBS_CONTAINER_IMAGE = "opencurvedocker/curvebs:latest" + DEFAULT_CURVEFS_CONTAINER_IMAGE = "opencurvedocker/curvefs:latest" + DEFAULT_ETCD_LISTEN_PEER_PORT = 2380 + DEFAULT_ETCD_LISTEN_CLIENT_PORT = 2379 + DEFAULT_MDS_LISTEN_PORT = 6700 + DEFAULT_MDS_LISTEN_DUMMY_PORT = 7700 + DEFAULT_CHUNKSERVER_LISTN_PORT = 8200 + DEFAULT_SNAPSHOTCLONE_LISTEN_PORT = 5555 + DEFAULT_SNAPSHOTCLONE_LISTEN_DUMMY_PORT = 8081 + DEFAULT_SNAPSHOTCLONE_LISTEN_PROXY_PORT = 8080 + DEFAULT_METASERVER_LISTN_PORT = 6800 + DEFAULT_METASERVER_LISTN_EXTARNAL_PORT = 7800 + DEFAULT_ENABLE_EXTERNAL_SERVER = false + DEFAULT_CHUNKSERVER_COPYSETS = 100 // copysets per chunkserver + DEFAULT_METASERVER_COPYSETS = 100 // copysets per metaserver +) + +type ( + // config item + item struct { + key string + require int + exclude bool // exclude for service config + defaultValue interface{} // nil means no default value + } + + itemSet struct { + items []*item + key2item map[string]*item + } +) + +// you should add config item to itemset iff you want to: +// (1) check the configuration item value, like type, valid value OR +// (2) filter out the configuration item for service config OR +// (3) set the default value for configuration item +var ( + itemset = &itemSet{ + items: []*item{}, + key2item: map[string]*item{}, + } + + CONFIG_PREFIX = itemset.insert( + "Prefix", + REQUIRE_STRING, + true, + func(dc *DeployConfig) interface{} { + if dc.GetKind() == KIND_CURVEBS { + return path.Join(LAYOUT_CURVEBS_ROOT_DIR, dc.GetRole()) + } + return path.Join(LAYOUT_CURVEFS_ROOT_DIR, dc.GetRole()) + }, + ) + + CONFIG_CONTAINER_IMAGE = itemset.insert( + "ContainerImage", + REQUIRE_STRING, + true, + func(dc *DeployConfig) interface{} { + if dc.GetKind() == KIND_CURVEBS { + return DEFAULT_CURVEBS_CONTAINER_IMAGE + } + return DEFAULT_CURVEFS_CONTAINER_IMAGE + }, + ) + + CONFIG_LOG_DIR = itemset.insert( + "LogDir", + REQUIRE_STRING, + true, + nil, + ) + + CONFIG_DATA_DIR = itemset.insert( + "DataDir", + REQUIRE_STRING, + true, + nil, + ) + + CONFIG_CORE_DIR = itemset.insert( + "CoreDir", + REQUIRE_STRING, + true, + nil, + ) + + CONFIG_LISTEN_PORT = itemset.insert( + "Port", + REQUIRE_POSITIVE_INTEGER, + true, + func(dc *DeployConfig) interface{} { + switch dc.GetRole() { + case ROLE_ETCD: + return DEFAULT_ETCD_LISTEN_PEER_PORT + case ROLE_MDS: + return DEFAULT_MDS_LISTEN_PORT + case ROLE_CHUNKSERVER: + return DEFAULT_CHUNKSERVER_LISTN_PORT + case ROLE_SNAPSHOTCLONE: + return DEFAULT_SNAPSHOTCLONE_LISTEN_PORT + case ROLE_METASERVER: + return DEFAULT_METASERVER_LISTN_PORT + } + return nil + }, + ) + + CONFIG_LISTEN_CLIENT_PORT = itemset.insert( + "ClientPort", + REQUIRE_POSITIVE_INTEGER, + true, + DEFAULT_ETCD_LISTEN_CLIENT_PORT, + ) + + CONFIG_LISTEN_DUMMY_PORT = itemset.insert( + "DummyPort", + REQUIRE_POSITIVE_INTEGER, + true, + func(dc *DeployConfig) interface{} { + switch dc.GetRole() { + case ROLE_MDS: + return DEFAULT_MDS_LISTEN_DUMMY_PORT + case ROLE_SNAPSHOTCLONE: + return DEFAULT_SNAPSHOTCLONE_LISTEN_DUMMY_PORT + } + return nil + }, + ) + + CONFIG_LISTEN_PROXY_PORT = itemset.insert( + "ProxyPort", + REQUIRE_POSITIVE_INTEGER, + true, + DEFAULT_SNAPSHOTCLONE_LISTEN_PROXY_PORT, + ) + + CONFIG_LISTEN_EXTERNAL_IP = itemset.insert( + "ExternalIp", + REQUIRE_STRING, + true, + func(dc *DeployConfig) interface{} { + return dc.GetHost() + }, + ) + + CONFIG_LISTEN_EXTERNAL_PORT = itemset.insert( + "ExternalPort", + REQUIRE_POSITIVE_INTEGER, + true, + func(dc *DeployConfig) interface{} { + if dc.GetRole() == ROLE_METASERVER { + return DEFAULT_METASERVER_LISTN_EXTARNAL_PORT + } + return dc.GetListenPort() + }, + ) + + CONFIG_ENABLE_EXTERNAL_SERVER = itemset.insert( + "global.enable_external_server", + REQUIRE_BOOL, + false, + DEFAULT_ENABLE_EXTERNAL_SERVER, + ) + + CONFIG_COPYSETS = itemset.insert( + "Copysets", + REQUIRE_POSITIVE_INTEGER, + true, + func(dc *DeployConfig) interface{} { + if dc.GetRole() == ROLE_CHUNKSERVER { + return DEFAULT_CHUNKSERVER_COPYSETS + } + return DEFAULT_METASERVER_COPYSETS + }, + ) + + CONFIG_S3_ACCESS_KEY = itemset.insert( + "s3.ak", + REQUIRE_STRING, + false, + nil, + ) + + CONFIG_S3_SECRET_KEY = itemset.insert( + "s3.sk", + REQUIRE_STRING, + false, + nil, + ) + + CONFIG_S3_ADDRESS = itemset.insert( + "s3.nos_address", + REQUIRE_STRING, + false, + nil, + ) + + CONFIG_S3_BUCKET_NAME = itemset.insert( + "s3.snapshot_bucket_name", + REQUIRE_STRING, + false, + nil, + ) +) + +func (i *item) Key() string { + return i.key +} + +func (itemset *itemSet) insert(key string, require int, exclude bool, defaultValue interface{}) *item { + i := &item{key, require, exclude, defaultValue} + itemset.key2item[key] = i + itemset.items = append(itemset.items, i) + return i +} + +func (itemset *itemSet) get(key string) *item { + return itemset.key2item[key] +} + +// func (itemset *itemSet) getAll() []*item { +// return itemset.items +// } diff --git a/pkg/topology/pool.go b/pkg/topology/pool.go deleted file mode 100644 index e976bd8a..00000000 --- a/pkg/topology/pool.go +++ /dev/null @@ -1,209 +0,0 @@ -package topology - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/opencurve/curve-operator/pkg/config" -) - -const ( - ROLE_CHUNKSERVER = "chunkserver" - ROLE_METASERVER = "metaserver" - - DEFAULT_CHUNKSERVER_COPYSETS = 100 - DEFAULT_REPLICAS_PER_COPYSET = 3 - DEFAULT_ZONES_PER_POOL = 3 - DEFAULT_TYPE = 0 - DEFAULT_SCATTER_WIDTH = 0 -) - -/* - * curvebs_cluster_topo: - * servers: - * - name: server1 - * internalip: 127.0.0.1 - * internalport: 16701 - * externalip: 127.0.0.1 - * externalport: 16701 - * zone: zone1 - * physicalpool: pool1 - * ... - * logicalpools: - * - name: pool1 - * physicalpool: pool1 - * replicasnum: 3 - * copysetnum: 100 - * zonenum: 3 - * type: 0 - * scatterwidth: 0 - * ... - * - * - * curvefs_cluster_topo: - * servers: - * - name: server1 - * internalip: 127.0.0.1 - * internalport: 16701 - * externalip: 127.0.0.1 - * externalport: 16701 - * zone: zone1 - * pool: pool1 - * ... - * pools: - * - name: pool1 - * replicasnum: 3 - * copysetnum: 100 - * zonenum: 3 - */ - -type ( - LogicalPool struct { - Name string `json:"name"` - Replicas int `json:"replicasnum"` - Zones int `json:"zonenum"` - Copysets int `json:"copysetnum"` - Type int `json:"type"` // curvebs - ScatterWidth int `json:"scatterwidth"` // curvebs - PhysicalPool string `json:"physicalpool"` // curvebs - } - - Server struct { - Name string `json:"name"` - InternalIp string `json:"internalip"` - InternalPort int `json:"internalport"` - ExternalIp string `json:"externalip"` - ExternalPort int `json:"externalport"` - Zone string `json:"zone"` - PhysicalPool string `json:"physicalpool,omitempty"` // curvebs - Pool string `json:"pool,omitempty"` // curvefs - } - - CurveClusterTopo struct { - Servers []Server `json:"servers"` - LogicalPools []LogicalPool `json:"logicalpools,omitempty"` // curvebs - Pools []LogicalPool `json:"pools,omitempty"` // curvefs - NPools int `json:"npools"` - } -) - -func genNextZone(zones int) func() string { - idx := 0 - return func() string { - idx++ - return fmt.Sprintf("zone%d", (idx-1)%zones+1) - } -} - -func formatName(dc *DeployConfig) string { - return fmt.Sprintf("%s_%d", dc.NodeName, dc.ReplicasSequence) -} - -// SortDeployConfigs we should sort the "dcs" for generate correct zone number -func SortDeployConfigs(dcs []*DeployConfig) { - sort.Slice(dcs, func(i, j int) bool { - dc1, dc2 := dcs[i], dcs[j] - if dc1.Role == dc2.Role { - if dc1.HostSequence == dc2.HostSequence { - return dc1.ReplicasSequence < dc2.ReplicasSequence - } - return dc1.HostSequence < dc2.HostSequence - } - return dc1.Role < dc2.Role - }) -} - -// createLogicalPool -func createLogicalPool(dcs []*DeployConfig, logicalPool string) (LogicalPool, []Server) { - var zone string - copysets := 0 - servers := []Server{} - zones := DEFAULT_ZONES_PER_POOL - nextZone := genNextZone(zones) - physicalPool := logicalPool - kind := dcs[0].Kind - // !important - SortDeployConfigs(dcs) - - for _, dc := range dcs { - if dc.ReplicasSequence == 0 || dc.StandAlone { - zone = nextZone() - logger.Info("stand-alonedeployment? ", dc.StandAlone) - } - - // NOTE: if we deploy chunkservers with replica feature - // and the value of replica greater than 1, we should - // set internal port and external port to 0 for let MDS - // attribute them as services on the same machine. - // see issue: https://github.com/opencurve/curve/issues/1441 - internalPort := dc.Port - externalPort := dc.Port - if dc.Replicas > 1 && !dc.StandAlone { - internalPort = 0 - externalPort = 0 - } - - server := Server{ - Name: formatName(dc), - InternalIp: dc.NodeIP, - InternalPort: internalPort, - ExternalIp: dc.NodeIP, - ExternalPort: externalPort, - Zone: zone, - } - if kind == config.KIND_CURVEBS { - server.PhysicalPool = physicalPool - } else { - server.Pool = logicalPool - } - - // copysets number ddefault value is 100 - copysets += dc.Copysets - servers = append(servers, server) - } - - // copysets - copysets = copysets / DEFAULT_REPLICAS_PER_COPYSET - if copysets == 0 { - copysets = 1 - } - - // logical pool field in topology.json file - lpool := LogicalPool{ - Name: logicalPool, - Copysets: copysets, - Zones: zones, - Replicas: DEFAULT_REPLICAS_PER_COPYSET, - } - if kind == config.KIND_CURVEBS { - lpool.ScatterWidth = DEFAULT_SCATTER_WIDTH - lpool.Type = DEFAULT_TYPE - lpool.PhysicalPool = physicalPool - } - return lpool, servers -} - -func genClusterPool(dcs []*DeployConfig) string { - // create CurveClusterTopo object by call createLogicalPool - lpool, servers := createLogicalPool(dcs, "pool1") - topo := CurveClusterTopo{Servers: servers, NPools: 1} - - if dcs[0].Kind == config.KIND_CURVEBS { - topo.LogicalPools = []LogicalPool{lpool} - } else { - topo.Pools = []LogicalPool{lpool} - } - - // generate the topology.json - var bytes []byte - bytes, err := json.Marshal(topo) - if err != nil { - return "" - } - clusterPoolJson := string(bytes) - // for debug - logger.Info(clusterPoolJson) - - return clusterPoolJson -} diff --git a/pkg/topology/register.go b/pkg/topology/register.go deleted file mode 100644 index 4a5eb9ff..00000000 --- a/pkg/topology/register.go +++ /dev/null @@ -1,172 +0,0 @@ -package topology - -import ( - "fmt" - "path" - - "github.com/coreos/pkg/capnslog" - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - "github.com/pkg/errors" - batch "k8s.io/api/batch/v1" - v1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - PYHSICAL_POOL = "physical_pool" - LOGICAL_POOL = "logical_pool" - JOB_PYHSICAL_POOL = "provision-physical-pool" - JOB_LOGICAL_POOL = "provision-logical-pool" -) - -var logger = capnslog.NewPackageLogger("github.com/opencurve/curve-operator", "topology") - -// RunCreatePoolJob create Job to register topology.json -func RunCreatePoolJob(c *daemon.Cluster, dcs []*DeployConfig, poolType string) (*batch.Job, error) { - job := &batch.Job{} - if poolType == PYHSICAL_POOL { - job, _ = makeGenPoolJob(c, poolType, "provision-physical-pool") - } else if poolType == LOGICAL_POOL { - job, _ = makeGenPoolJob(c, poolType, "provision-logical-pool") - } - - existingJob, err := c.Context.Clientset.BatchV1().Jobs(job.Namespace).Get(job.Name, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - logger.Warningf("failed to detect job %s. %+v", job.Name, err) - } else if err == nil { - if existingJob.Status.Active > 0 { - logger.Infof("Found previous job %s. Status=%+v", job.Name, existingJob.Status) - return existingJob, nil - } - } - - _, err = c.Context.Clientset.BatchV1().Jobs(job.Namespace).Create(job) - logger.Infof("job created to generate %s", poolType) - - return &batch.Job{}, err -} - -func makeGenPoolJob(c *daemon.Cluster, poolType string, jobName string) (*batch.Job, error) { - // topology.json and tools.conf volume and volumemount - volumes, mounts := CreateTopoAndToolVolumeAndMount(c) - podSpec := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Labels: getRegisterJobLabel(poolType), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - makeCreatePoolContainer(c, poolType, mounts), - }, - RestartPolicy: v1.RestartPolicyOnFailure, - HostNetwork: true, - DNSPolicy: v1.DNSClusterFirstWithHostNet, - Volumes: volumes, - }, - } - - job := &batch.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: c.Namespace, - Labels: getRegisterJobLabel(poolType), - }, - Spec: batch.JobSpec{ - Template: podSpec, - }, - } - - // set ownerReference - err := c.OwnerInfo.SetControllerReference(job) - if err != nil { - return nil, errors.Wrapf(err, "failed to set owner reference to mon deployment %q", job.Name) - } - - return job, nil -} - -func makeCreatePoolContainer(c *daemon.Cluster, poolType string, mounts []v1.VolumeMount) v1.Container { - privileged := true - runAsUser := int64(0) - runAsNonRoot := false - readOnlyRootFilesystem := false - - toolsBinaryPath := "" - args := []string{} - if c.Kind == config.KIND_CURVEBS { - toolsBinaryPath = "/curvebs/tools/sbin/curvebs-tool" - argsOp := "" - if poolType == PYHSICAL_POOL { - argsOp = fmt.Sprintf("-op=%s", "create_physicalpool") - } else if poolType == LOGICAL_POOL { - argsOp = fmt.Sprintf("-op=%s", "create_logicalpool") - } - args = append(args, argsOp) - - clusterMapPath := path.Join(config.TopoJsonConfigmapMountPathDir, config.TopoJsonConfigmapDataKey) - argsClusterMap := fmt.Sprintf("-cluster_map=%s", clusterMapPath) - args = append(args, argsClusterMap) - } else { - toolsBinaryPath = "/curvefs/tools/sbin/curvefs_tool" - args = append(args, "create-topology") - } - - container := v1.Container{ - Name: "pool", - Args: args, - Command: []string{ - toolsBinaryPath, - }, - Image: c.CurveVersion.Image, - ImagePullPolicy: c.CurveVersion.ImagePullPolicy, - VolumeMounts: mounts, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - RunAsUser: &runAsUser, - RunAsNonRoot: &runAsNonRoot, - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, - }, - } - - return container -} - -// CreateTopoConfigMap create topology configmap -func CreateTopoConfigMap(c *daemon.Cluster, dcs []*DeployConfig) error { - // get topology.json string - if len(dcs) == 0 { - return errors.New("deployconfigs length is 0 tp create cluster pool") - } - clusterPoolJson := genClusterPool(dcs) - topoConfigMap := map[string]string{ - config.TopoJsonConfigmapDataKey: clusterPoolJson, - } - - cm := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.TopoJsonConfigMapName, - Namespace: c.Namespace, - }, - Data: topoConfigMap, - } - - err := c.OwnerInfo.SetControllerReference(cm) - if err != nil { - return errors.Wrapf(err, "failed to set owner reference to topology.json configmap %q", config.TopoJsonConfigMapName) - } - - // Create topology-json-conf configmap in cluster - _, err = c.Context.Clientset.CoreV1().ConfigMaps(c.Namespace).Create(cm) - if err != nil && !kerrors.IsAlreadyExists(err) { - return err - } - return nil -} - -func getRegisterJobLabel(poolType string) map[string]string { - labels := make(map[string]string) - labels["pool"] = poolType - return labels -} diff --git a/pkg/topology/topology.go b/pkg/topology/topology.go new file mode 100644 index 00000000..46ed525a --- /dev/null +++ b/pkg/topology/topology.go @@ -0,0 +1,136 @@ +package topology + +import ( + "fmt" + "strconv" + + "github.com/opencurve/curve-operator/pkg/clusterd" + "github.com/opencurve/curve-operator/pkg/k8sutil" + "github.com/opencurve/curve-operator/pkg/utils" + + "github.com/pkg/errors" +) + +// ParseTopology parse topology according to BsCluster or FsCluster CR(yaml declaration) +func ParseTopology(cluster clusterd.Clusterer) ([]*DeployConfig, error) { + kind := cluster.GetKind() + roles := []string{} + if kind == clusterd.KIND_CURVEBS { + roles = append(roles, CURVEBS_ROLES...) + } else if kind == clusterd.KIND_CURVEFS { + roles = append(roles, CURVEFS_ROLES...) + } else { + return nil, errors.New("Unknown cluster kind") + } + + dcs := []*DeployConfig{} + for _, role := range roles { + for hostSequence, host := range cluster.GetNodes() { + instances := cluster.GetRoleInstances(role) + // deploy etcd || mds || snapshotclone service using first three nodes + if role == ROLE_ETCD || role == ROLE_MDS || role == ROLE_SNAPSHOTCLONE { + if hostSequence > 2 { + break + } + } + hostIp, err := k8sutil.GetNodeIpByName(host, cluster.GetContext().Clientset) + if err != nil { + return nil, err + } + for instancesSequence := 0; instancesSequence < instances; instancesSequence++ { + config := cluster.GetRoleConfigs(role) + // merge port config and global config to configs of each service + mergePortConfig(cluster, role, instancesSequence, config) + mergeGlobalConfig(cluster, role, instancesSequence, config) + dc, err := NewDeployConfig(kind, role, host, hostIp, instances, + instancesSequence, hostSequence, config) + if err != nil { + return nil, err + } + dcs = append(dcs, dc) + } + } + } + + for i, dc := range dcs { + if err := AddServiceVariables(dcs, i); err != nil { + return nil, err + } else if err = AddClusterVariables(dcs, i); err != nil { + return nil, err + } + // Add config to serviceConfig + dc.convert() + } + + return dcs, nil +} + +func NewDeployConfig(kind, role, host, hostIp string, + instances, instanceSequence, hostSequence int, + config map[string]string) (*DeployConfig, error) { + + for k, v := range config { + if strv, ok := utils.All2Str(v); ok { + config[k] = strv + } else { + return nil, errors.New("Unsupport Configure value type") + } + } + + return &DeployConfig{ + kind: kind, + id: formatId(role, host, hostSequence, instanceSequence), + parentId: formatId(role, host, hostSequence, 0), + name: formatName(role, hostSequence, instanceSequence), + role: role, + host: host, + hostIp: hostIp, + hostSequence: hostSequence, + instances: instances, + instancesSequence: instanceSequence, + variables: NewVariables(), + config: config, + serviceConfig: map[string]string{}, + }, nil +} + +// getPortConfigOfRole handle specified port of every service +func mergePortConfig(cluster clusterd.Clusterer, role string, + instanceSequence int, configs map[string]string) { + if isEmptyString(configs[CONFIG_LISTEN_PORT.key]) { + configs[CONFIG_LISTEN_PORT.key] = strconv.Itoa(cluster.GetRolePort(role) + instanceSequence) + } + if isEmptyString(configs[CONFIG_LISTEN_CLIENT_PORT.key]) { + configs[CONFIG_LISTEN_CLIENT_PORT.key] = strconv.Itoa(cluster.GetRoleClientPort(role) + instanceSequence) + } + if isEmptyString(configs[CONFIG_LISTEN_DUMMY_PORT.key]) { + configs[CONFIG_LISTEN_DUMMY_PORT.key] = strconv.Itoa(cluster.GetRoleDummyPort(role) + instanceSequence) + } + if isEmptyString(configs[CONFIG_LISTEN_EXTERNAL_PORT.key]) { + configs[CONFIG_LISTEN_EXTERNAL_PORT.key] = strconv.Itoa(cluster.GetRoleExternalPort(role) + instanceSequence) + } +} + +// mergeGlobalConfig handle global config, such as +// ContainerImage, dataDir, logDir, Copysets etc. +func mergeGlobalConfig(cluster clusterd.Clusterer, role string, + instanceSequence int, configs map[string]string) { + if isEmptyString(configs[CONFIG_CONTAINER_IMAGE.key]) { + configs[CONFIG_CONTAINER_IMAGE.key] = cluster.GetContainerImage() + } + if isEmptyString(configs[CONFIG_COPYSETS.key]) { + configs[CONFIG_COPYSETS.key] = strconv.Itoa(cluster.GetCopysets()) + } + if isEmptyString(configs[CONFIG_DATA_DIR.key]) { + configs[CONFIG_DATA_DIR.key] = fmt.Sprint(trimString(cluster.GetDataDir()), "/", role, instanceSequence) + } else { + dataDir := configs[CONFIG_LOG_DIR.key] + configs[CONFIG_LOG_DIR.key] = fmt.Sprint(trimString(dataDir), "/", role, instanceSequence) + } + if isEmptyString(configs[CONFIG_LOG_DIR.key]) { + configs[CONFIG_LOG_DIR.key] = fmt.Sprint(trimString(cluster.GetLogDir()), "/", role, instanceSequence) + } else { + logDir := configs[CONFIG_LOG_DIR.key] + configs[CONFIG_LOG_DIR.key] = fmt.Sprint(trimString(logDir), "/", role, instanceSequence) + } +} diff --git a/pkg/topology/util.go b/pkg/topology/util.go new file mode 100644 index 00000000..d8bd2198 --- /dev/null +++ b/pkg/topology/util.go @@ -0,0 +1,36 @@ +package topology + +import ( + "fmt" + "strings" +) + +func Choose(ok bool, first, second string) string { + if ok { + return first + } + return second +} + +// helper function +// etcd_hostname_0_0 +func formatId(role, host string, hostSequence, instancesSequence int) string { + return fmt.Sprintf("%s_%s_%d_%d", role, host, hostSequence, instancesSequence) +} + +// etcd00 +func formatName(role string, hostSequence, instancesSequence int) string { + return fmt.Sprintf("%s%d%d", role, hostSequence, instancesSequence) +} + +// isEmptyString trim the left and right space of a string, if "" return true, else return false +func isEmptyString(s string) bool { + return len(strings.TrimSpace(s)) == 0 +} + +// trimString trim the left and right space and '/' right of string +func trimString(s string) string { + ret := strings.TrimSpace(s) + ret = strings.TrimSuffix(ret, "/") + return ret +} diff --git a/pkg/topology/variable.go b/pkg/topology/variable.go new file mode 100644 index 00000000..cfea1aad --- /dev/null +++ b/pkg/topology/variable.go @@ -0,0 +1,126 @@ +package topology + +import ( + "fmt" + "regexp" +) + +// Variable +const ( + REGEX_VARIABLE = `\${([^${}]+)}` // ${var_name} +) + +type Variable struct { + Name string + Description string + Value string + Resolved bool +} + +type Variables struct { + m map[string]*Variable + r *regexp.Regexp +} + +func NewVariables() *Variables { + return &Variables{ + m: map[string]*Variable{}, + } +} + +func (vars *Variables) Register(v Variable) error { + name := v.Name + if _, ok := vars.m[name]; ok { + return fmt.Errorf("variable '%s' duplicate define", name) + } + + vars.m[name] = &v + return nil +} + +func (vars *Variables) Get(name string) (string, error) { + v, ok := vars.m[name] + if !ok { + return "", fmt.Errorf("variable '%s' not found", name) + } else if !v.Resolved { + return "", fmt.Errorf("variable '%s' unresolved", name) + } + + return v.Value, nil +} + +func (vars *Variables) Set(name, value string) error { + v, ok := vars.m[name] + if !ok { + return fmt.Errorf("variable '%s' not found", name) + } + + v.Value = value + v.Resolved = true + return nil +} + +func (vars *Variables) resolve(name string, marked map[string]bool) (string, error) { + marked[name] = true + v, ok := vars.m[name] + if !ok { + return "", fmt.Errorf("variable '%s' not defined", name) + } else if v.Resolved { + return v.Value, nil + } + + matches := vars.r.FindAllStringSubmatch(v.Value, -1) + if len(matches) == 0 { // no variable + v.Resolved = true + return v.Value, nil + } + + // resolve all sub-variable + for _, mu := range matches { + name = mu[1] + if _, err := vars.resolve(name, marked); err != nil { + return "", err + } + } + + // ${var} + v.Value = vars.r.ReplaceAllStringFunc(v.Value, func(name string) string { + return vars.m[name[2:len(name)-1]].Value + }) + v.Resolved = true + return v.Value, nil +} + +func (vars *Variables) Build() error { + r, err := regexp.Compile(REGEX_VARIABLE) + if err != nil { + return err + } + + vars.r = r + for _, v := range vars.m { + marked := map[string]bool{} + if _, err := vars.resolve(v.Name, marked); err != nil { + return err + } + } + return nil +} + +// "hello, ${varname}" => "hello, world" +func (vars *Variables) Rendering(s string) (string, error) { + matches := vars.r.FindAllStringSubmatch(s, -1) + if len(matches) == 0 { // no variable + return s, nil + } + + var err error + value := vars.r.ReplaceAllStringFunc(s, func(name string) string { + val, e := vars.Get(name[2 : len(name)-1]) + if e != nil && err == nil { + err = e + } + return val + }) + return value, err +} diff --git a/pkg/topology/variables.go b/pkg/topology/variables.go new file mode 100644 index 00000000..4efea80f --- /dev/null +++ b/pkg/topology/variables.go @@ -0,0 +1,290 @@ +package topology + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/google/uuid" + "github.com/opencurve/curve-operator/pkg/utils" +) + +const ( + SELECT_LISTEN_PORT = iota + SELECT_LISTEN_CLIENT_PORT + SELECT_LISTEN_DUMMY_PORT + SELECT_LISTEN_PROXY_PORT +) + +type Var struct { + name string + kind []string // kind limit for register variable + role []string // role limit for register variable + lookup bool // whether need to lookup host + resolved bool +} + +/* + * built-in variables: + * + * service: + * ${prefix} "/curvebs/{etcd,mds,chunkserver}" + * ${service_id} "c690bde11d1a" + * ${service_role} "mds" + * ${service_host} "10.0.0.1" + * ${service_host_sequence} "1" + * ${service_instances_sequence} "1" + * ${format_instances_sequence} "01" + * ${service_addr} "10.0.0.1" + * ${service_port} "6666" + * ${service_client_port} "2379" (etcd) + * ${service_dummy_port} "6667" (snapshotclone/mds) + * ${service_proxy_port} "8080" (snapshotclone) + * ${service_external_addr} "10.0.10.1" (chunkserver/metaserver) + * ${service_external_port} "7800" (metaserver) + * ${log_dir} "/data/logs" + * ${data_dir} "/data" + * ${random_uuid} "6fa8f01c411d7655d0354125c36847bb" + * + * cluster: + * ${cluster_etcd_http_addr} "etcd1=http://10.0.10.1:2380,etcd2=http://10.0.10.2:2380,etcd3=http://10.0.10.3:2380" + * ${cluster_etcd_addr} "10.0.10.1:2380,10.0.10.2:2380,10.0.10.3:2380" + * ${cluster_mds_addr} "10.0.10.1:6666,10.0.10.2:6666,10.0.10.3:6666" + * ${cluster_mds_dummy_addr} "10.0.10.1:6667,10.0.10.2:6667,10.0.10.3:6667" + * ${cluster_mds_dummy_port} "6667,6668,6669" + * ${cluster_chunkserver_addr} "10.0.10.1:6800,10.0.10.2:6800,10.0.10.3:6800" + * ${cluster_snapshotclone_addr} "10.0.10.1:5555,10.0.10.2:5555,10.0.10.3:5555" + * ${cluster_snapshotclone_proxy_addr} "10.0.10.1:8080,10.0.10.2:8080,10.0.10.3:8083" + * ${cluster_snapshotclone_dummy_port} "8081,8082,8083" + * ${cluster_snapshotclone_nginx_upstream} "server 10.0.0.1:5555; server 10.0.0.3:5555; server 10.0.0.3:5555;" + * ${cluster_metaserver_addr} "10.0.10.1:6701,10.0.10.2:6701,10.0.10.3:6701" + */ +var ( + serviceVars = []Var{ + {name: "prefix"}, + {name: "service_id"}, + {name: "service_role"}, + {name: "service_host", lookup: true}, + {name: "service_host_sequence"}, + {name: "service_replica_sequence"}, + {name: "service_replicas_sequence"}, + {name: "service_instances_sequence"}, + {name: "format_replica_sequence"}, + {name: "format_replicas_sequence"}, + {name: "format_instances_sequence"}, + {name: "service_addr", lookup: true}, + {name: "service_port"}, + {name: "service_client_port", role: []string{ROLE_ETCD}}, + {name: "service_dummy_port", role: []string{ROLE_SNAPSHOTCLONE, ROLE_MDS}}, + {name: "service_proxy_port", role: []string{ROLE_SNAPSHOTCLONE}}, + {name: "service_external_addr", role: []string{ROLE_CHUNKSERVER, ROLE_METASERVER}, lookup: true}, + {name: "service_external_port", role: []string{ROLE_METASERVER}}, + {name: "log_dir"}, + {name: "data_dir"}, + {name: "random_uuid"}, + } + + // NOTE: we don't support cluster variable exist in topology + clusterVars = []Var{ + {name: "cluster_etcd_http_addr"}, + {name: "cluster_etcd_addr"}, + {name: "cluster_mds_addr"}, + {name: "cluster_mds_dummy_addr"}, + {name: "cluster_mds_dummy_port"}, + {name: "cluster_chunkserver_addr", kind: []string{KIND_CURVEBS}}, + {name: "cluster_snapshotclone_addr"}, + {name: "cluster_snapshotclone_proxy_addr", kind: []string{KIND_CURVEBS}}, + {name: "cluster_snapshotclone_dummy_port"}, + {name: "cluster_snapshotclone_nginx_upstream", kind: []string{KIND_CURVEBS}}, + {name: "cluster_snapshot_addr"}, // tools-v2: compatible with some old version image + {name: "cluster_snapshot_dummy_addr"}, // tools-v2 + {name: "cluster_metaserver_addr", kind: []string{KIND_CURVEFS}}, + } +) + +func skip(dc *DeployConfig, v Var) bool { + role := dc.GetRole() + kind := dc.GetKind() + if len(v.kind) != 0 && !utils.Slice2Map(v.kind)[kind] { + return true + } else if len(v.role) != 0 && !utils.Slice2Map(v.role)[role] { + return true + } + + return false +} + +func addVariables(dcs []*DeployConfig, idx int, vars []Var) error { + dc := dcs[idx] + for _, v := range vars { + if skip(dc, v) { + continue + } + + err := dc.GetVariables().Register(Variable{ + Name: v.name, + Value: getValue(v.name, dcs, idx), + }) + if err != nil { + return errors.New("failed to register variable to deploy config") + } + } + + return nil +} + +func AddServiceVariables(dcs []*DeployConfig, idx int) error { + return addVariables(dcs, idx, serviceVars) +} + +func AddClusterVariables(dcs []*DeployConfig, idx int) error { + return addVariables(dcs, idx, clusterVars) +} + +/* + * interface for get variable value + */ +func joinEtcdPeer(dcs []*DeployConfig) string { + peers := []string{} + for _, dc := range dcs { + if dc.GetRole() != ROLE_ETCD { + continue + } + + hostSequence := dc.GetHostSequence() + instanceSquence := dc.GetInstancesSequence() + peerHost := dc.GetHostIp() + peerPort := dc.GetListenPort() + peer := fmt.Sprintf("etcd%d%d=http://%s:%d", hostSequence, instanceSquence, peerHost, peerPort) + peers = append(peers, peer) + } + return strings.Join(peers, ",") +} + +func joinPeer(dcs []*DeployConfig, selectRole string, selectPort int) string { + peers := []string{} + for _, dc := range dcs { + if dc.GetRole() != selectRole { + continue + } + + peerHost := dc.GetHostIp() + peerPort := dc.GetListenPort() + switch selectPort { + case SELECT_LISTEN_CLIENT_PORT: + peerPort = dc.GetListenClientPort() + case SELECT_LISTEN_DUMMY_PORT: + peerPort = dc.GetListenDummyPort() + case SELECT_LISTEN_PROXY_PORT: + peerPort = dc.GetListenProxyPort() + } + peer := fmt.Sprintf("%s:%d", peerHost, peerPort) + peers = append(peers, peer) + } + return strings.Join(peers, ",") +} + +func joinDummyPort(dcs []*DeployConfig, selectRole string) string { + ports := []string{} + for _, dc := range dcs { + if dc.GetRole() != selectRole { + continue + } + ports = append(ports, strconv.Itoa(dc.GetListenDummyPort())) + } + return strings.Join(ports, ",") +} + +func joinNginxUpstreamServer(dcs []*DeployConfig) string { + servers := []string{} + for _, dc := range dcs { + if dc.GetRole() != ROLE_SNAPSHOTCLONE { + continue + } + peerHost := dc.GetHostIp() + peerPort := dc.GetListenPort() + server := fmt.Sprintf("server %s:%d;", peerHost, peerPort) + servers = append(servers, server) + } + return strings.Join(servers, " ") +} + +func getValue(name string, dcs []*DeployConfig, idx int) string { + dc := dcs[idx] + switch name { + case "prefix": + return dc.GetProjectLayout().ServiceRootDir + case "service_id": + return dc.GetId() + case "service_role": + return dc.GetRole() + case "service_host": + return dc.GetHost() + case "service_host_sequence": + return strconv.Itoa(dc.GetHostSequence()) + case "service_replica_sequence": + return strconv.Itoa(dc.GetInstancesSequence()) + case "service_replicas_sequence": + return strconv.Itoa(dc.GetInstancesSequence()) + case "service_instances_sequence": + return strconv.Itoa(dc.GetInstancesSequence()) + case "format_replica_sequence": + return fmt.Sprintf("%02d", dc.GetInstancesSequence()) + case "format_replicas_sequence": + return fmt.Sprintf("%02d", dc.GetInstancesSequence()) + case "format_instances_sequence": + return fmt.Sprintf("%02d", dc.GetInstancesSequence()) + case "service_addr": + return dc.GetHostIp() + case "service_port": + return utils.Atoa(dc.GetListenPort()) + case "service_client_port": // etcd + return utils.Atoa(dc.GetListenClientPort()) + case "service_dummy_port": // mds, snapshotclone + return utils.Atoa(dc.GetListenDummyPort()) + case "service_proxy_port": // snapshotclone + return utils.Atoa(dc.GetListenProxyPort()) + case "service_external_addr": // chunkserver, metaserver + return utils.Atoa(dc.GetListenExternalIp()) + case "service_external_port": // metaserver + if utils.Atoa(dc.GetEnableExternalServer()) == "true" { + return utils.Atoa(dc.GetListenExternalPort()) + } + return utils.Atoa(dc.GetListenPort()) + case "log_dir": + return dc.GetLogDir() + case "data_dir": + return dc.GetDataDir() + case "random_uuid": + return uuid.New().String() + case "cluster_etcd_http_addr": + return joinEtcdPeer(dcs) + case "cluster_etcd_addr": + return joinPeer(dcs, ROLE_ETCD, SELECT_LISTEN_CLIENT_PORT) + case "cluster_mds_addr": + return joinPeer(dcs, ROLE_MDS, SELECT_LISTEN_PORT) + case "cluster_mds_dummy_addr": + return joinPeer(dcs, ROLE_MDS, SELECT_LISTEN_DUMMY_PORT) + case "cluster_mds_dummy_port": + return joinDummyPort(dcs, ROLE_MDS) + case "cluster_chunkserver_addr": + return joinPeer(dcs, ROLE_CHUNKSERVER, SELECT_LISTEN_PORT) + case "cluster_snapshotclone_addr": + return joinPeer(dcs, ROLE_SNAPSHOTCLONE, SELECT_LISTEN_PORT) + case "cluster_snapshotclone_proxy_addr": + return joinPeer(dcs, ROLE_SNAPSHOTCLONE, SELECT_LISTEN_PROXY_PORT) + case "cluster_snapshotclone_dummy_port": + return joinDummyPort(dcs, ROLE_SNAPSHOTCLONE) + case "cluster_snapshotclone_nginx_upstream": + return joinNginxUpstreamServer(dcs) + case "cluster_snapshot_addr": + return joinPeer(dcs, ROLE_SNAPSHOTCLONE, SELECT_LISTEN_PORT) + case "cluster_snapshot_dummy_addr": + return joinPeer(dcs, ROLE_SNAPSHOTCLONE, SELECT_LISTEN_DUMMY_PORT) + case "cluster_metaserver_addr": + return joinPeer(dcs, ROLE_METASERVER, SELECT_LISTEN_PORT) + } + + return "" +} diff --git a/pkg/topology/volume.go b/pkg/topology/volume.go deleted file mode 100644 index 14ca3ff3..00000000 --- a/pkg/topology/volume.go +++ /dev/null @@ -1,81 +0,0 @@ -package topology - -import ( - "github.com/opencurve/curve-operator/pkg/config" - "github.com/opencurve/curve-operator/pkg/daemon" - v1 "k8s.io/api/core/v1" -) - -// CreateTopoAndToolVolumeAndMount creates volumes and volumeMounts for topo and tool -func CreateTopoAndToolVolumeAndMount(c *daemon.Cluster) ([]v1.Volume, []v1.VolumeMount) { - vols := []v1.Volume{} - mounts := []v1.VolumeMount{} - - var topoMountPath, toolMountPath string - if c.Kind == config.KIND_CURVEBS { - topoMountPath = config.TopoJsonConfigmapMountPathDir - toolMountPath = config.ToolsConfigMapMountPathDir - } else { - topoMountPath = config.FSTopoJsonConfigmapMountPathDir - toolMountPath = config.FSToolsConfigMapMountPathDir - } - - // Create topology configmap volume and volume mount("/curvebs/tools/conf/topology.json") - mode := int32(0644) - topoConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: config.TopoJsonConfigMapName, - }, - Items: []v1.KeyToPath{ - { - Key: config.TopoJsonConfigmapDataKey, - Path: config.TopoJsonConfigmapDataKey, - Mode: &mode, - }, - }, - } - topoConfigVol := v1.Volume{ - Name: config.TopoJsonConfigMapName, - VolumeSource: v1.VolumeSource{ - ConfigMap: topoConfigMapVolSource, - }, - } - vols = append(vols, topoConfigVol) - - topoMount := v1.VolumeMount{ - Name: config.TopoJsonConfigMapName, - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: topoMountPath, - } - mounts = append(mounts, topoMount) - - // Create tools configmap volume and volume mount("/etc/curve/tools.conf") - toolConfigMapVolSource := &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: config.ToolsConfigMapName, - }, - Items: []v1.KeyToPath{ - { - Key: config.ToolsConfigMapDataKey, - Path: config.ToolsConfigMapDataKey, - Mode: &mode, - }, - }, - } - toolConfigVol := v1.Volume{ - Name: config.ToolsConfigMapName, - VolumeSource: v1.VolumeSource{ - ConfigMap: toolConfigMapVolSource, - }, - } - vols = append(vols, toolConfigVol) - - toolMount := v1.VolumeMount{ - Name: config.ToolsConfigMapName, - ReadOnly: true, // should be no reason to write to the config in pods, so enforce this - MountPath: toolMountPath, - } - mounts = append(mounts, toolMount) - - return vols, mounts -} diff --git a/pkg/utils/common.go b/pkg/utils/common.go new file mode 100644 index 00000000..27beb250 --- /dev/null +++ b/pkg/utils/common.go @@ -0,0 +1,12 @@ +package utils + +const ( + AFTER_MUTATE_CONF = "after-mutate-conf" +) + +func Choose(ok bool, first, second string) string { + if ok { + return first + } + return second +} diff --git a/pkg/utils/slice.go b/pkg/utils/slice.go new file mode 100644 index 00000000..d3d6677b --- /dev/null +++ b/pkg/utils/slice.go @@ -0,0 +1,9 @@ +package utils + +func Slice2Map(s []string) map[string]bool { + m := map[string]bool{} + for _, item := range s { + m[item] = true + } + return m +} diff --git a/pkg/utils/string.go b/pkg/utils/string.go new file mode 100644 index 00000000..8184b525 --- /dev/null +++ b/pkg/utils/string.go @@ -0,0 +1,82 @@ +package utils + +import ( + "reflect" + "strconv" +) + +func Type(v interface{}) string { + switch v.(type) { + case bool: + return "bool" + case string: + return "string" + case int: + return "int" + case int64: + return "int64" + case map[string]interface{}: + return "string_interface_map" + default: + return "unknown" + } +} + +func IsBool(v interface{}) bool { + return Type(v) == "bool" +} + +func IsString(v interface{}) bool { + return Type(v) == "string" +} + +func IsInt(v interface{}) bool { + return Type(v) == "int" +} + +func IsInt64(v interface{}) bool { + return Type(v) == "int64" +} + +func IsStringAnyMap(v interface{}) bool { + return Type(v) == "string_interface_map" +} + +func IsFunc(v interface{}) bool { + return reflect.TypeOf(v).Kind() == reflect.Func +} + +func All2Str(v interface{}) (value string, ok bool) { + ok = true + if IsString(v) { + value = v.(string) + } else if IsInt(v) { + value = strconv.Itoa(v.(int)) + } else if IsBool(v) { + value = strconv.FormatBool(v.(bool)) + } else { + ok = false + } + return +} + +// convert all to string +func Atoa(v interface{}) string { + value, _ := All2Str(v) + return value +} + +func Str2Int(s string) (int, bool) { + v, err := strconv.Atoi(s) + return v, err == nil +} + +func Str2Bool(s string) (bool, bool) { // value, ok + v, err := strconv.ParseBool(s) + return v, err == nil +} + +func IsTrueStr(s string) bool { + v, yes := Str2Bool(s) + return yes && v == true +}