diff --git a/controller/backing_image_controller.go b/controller/backing_image_controller.go index b73ce6a959..ee6e4e2307 100644 --- a/controller/backing_image_controller.go +++ b/controller/backing_image_controller.go @@ -110,6 +110,13 @@ func NewBackingImageController( } bic.cacheSyncs = append(bic.cacheSyncs, ds.ReplicaInformer.HasSynced) + if _, err = ds.NodeInformer.AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ + UpdateFunc: bic.enqueueBackingImageForNodeUpdate, + }, 0); err != nil { + return nil, err + } + bic.cacheSyncs = append(bic.cacheSyncs, ds.NodeInformer.HasSynced) + return bic, nil } @@ -254,6 +261,12 @@ func (bic *BackingImageController) syncBackingImage(key string) (err error) { if err != nil { return } + if !reflect.DeepEqual(existingBackingImage.Spec, backingImage.Spec) { + if _, err := bic.ds.UpdateBackingImage(backingImage); err != nil && apierrors.IsConflict(errors.Cause(err)) { + log.WithError(err).Debugf("Requeue %v due to conflict", key) + bic.enqueueBackingImage(backingImage) + } + } if reflect.DeepEqual(existingBackingImage.Status, backingImage.Status) { return } @@ -291,9 +304,123 @@ func (bic *BackingImageController) syncBackingImage(key string) (err error) { return err } + if err := bic.replenishBackingImageCopies(backingImage); err != nil { + return err + } + + bic.cleanupEvictionRequestedBackingImages(backingImage) + + return nil +} + +func (bic *BackingImageController) replenishBackingImageCopies(bi *longhorn.BackingImage) (err error) { + bids, err := bic.ds.GetBackingImageDataSource(bi.Name) + if err != nil { + if apierrors.IsNotFound(err) { + return nil + } + return errors.Wrap(err, "failed to get the backing image data source") + } + // only maintain the minNumberOfReplicas after BackingImage is transferred to BackingImageManager + if !bids.Spec.FileTransferred { + return nil + } + + logrus.Infof("[DEBUG] replenishBackingImageCopies") + + nonFailedCopies := 0 + usedDisks := map[string]bool{} + for diskUUID := range bi.Spec.Disks { + fileStatus, exists := bi.Status.DiskFileStatusMap[diskUUID] + if !exists || (fileStatus.State != longhorn.BackingImageStateFailed && + fileStatus.State != longhorn.BackingImageStateFailedAndCleanUp && + fileStatus.State != longhorn.BackingImageStateUnknown) { + + // Non-existing file in status could due to not being synced from backing image manager yet. + // Consider it as newly created copy and count it as non-failed copies. + // So we don't create extra copy when handling copies evictions. + usedDisks[diskUUID] = true + nonFailedCopies += 1 + } + } + logrus.Infof("[DEBUG] nonFailedCopies: %v", nonFailedCopies) + + if nonFailedCopies == 0 { + return nil + } else if nonFailedCopies >= bi.Spec.MinNumberOfCopies { + if err := bic.handleBackingImageCopiesEvictions(nonFailedCopies, bi, usedDisks); err != nil { + return nil + } + } else { //nonFailedCopies < bi.Spec.MinNumberOfCopies + readyNode, readyDiskName, err := bic.ds.GetReadyNodeDiskForBackingImage(bi, usedDisks) + logrus.Infof("[DEBUG] replicate the copy to node: %v, disk: %v", readyNode, readyDiskName) + if err != nil { + logrus.WithError(err).Warnf("failed to create the backing image copy") + return nil + } + // BackingImageManager will then sync the BackingImage to the disk + bi.Spec.Disks[readyNode.Status.DiskStatus[readyDiskName].DiskUUID] = "" + } + + return nil +} + +// handleBackingImageCopiesEvictions do creating one more replica for eviction, if requested +func (bic *BackingImageController) handleBackingImageCopiesEvictions(nonFailedCopies int, bi *longhorn.BackingImage, usedDisks map[string]bool) (err error) { + log := getLoggerForBackingImage(bic.logger, bi) + NonEvictingCount := nonFailedCopies + + for _, fileStatus := range bi.Status.DiskFileStatusMap { + if fileStatus.EvictionRequested { + NonEvictingCount-- + } + } + + if NonEvictingCount < bi.Spec.MinNumberOfCopies { + log.Infof("[DEBUG] Creating one more backing image copy for eviction") + readyNode, readyDiskName, err := bic.ds.GetReadyNodeDiskForBackingImage(bi, usedDisks) + if err != nil { + logrus.WithError(err).Warnf("[DEBUG] failed to create the backing image copy") + return nil + } + // BackingImageManager will then sync the BackingImage to the disk + bi.Spec.Disks[readyNode.Status.DiskStatus[readyDiskName].DiskUUID] = "" + } + return nil } +func (bic *BackingImageController) cleanupEvictionRequestedBackingImages(bi *longhorn.BackingImage) { + log := getLoggerForBackingImage(bic.logger, bi) + + // If there is no non-evicting healthy backing image copy, + // Longhorn should retain one evicting healthy backing image copy for replenishing. + hasNonEvictingHealthyBackingImageCopy := false + evictingHealthyBackingImageCopyDiskUUID := "" + for diskUUID, fileStatus := range bi.Status.DiskFileStatusMap { + if fileStatus.State != longhorn.BackingImageStateReady { + continue + } + if !fileStatus.EvictionRequested { + hasNonEvictingHealthyBackingImageCopy = true + break + } + evictingHealthyBackingImageCopyDiskUUID = diskUUID + } + + for diskUUID, fileStatus := range bi.Status.DiskFileStatusMap { + if !fileStatus.EvictionRequested { + continue + } + if !hasNonEvictingHealthyBackingImageCopy && diskUUID == evictingHealthyBackingImageCopyDiskUUID { + log.Warnf("[DEBUG] Failed to evict backing image copy on disk %v for now since there is no other healthy backing image copy", diskUUID) + continue + } + delete(bi.Spec.Disks, diskUUID) + log.Infof("[DEBUG] Evicted backing image copy on disk %v", diskUUID) + } +} + func (bic *BackingImageController) IsBackingImageDataSourceCleaned(bi *longhorn.BackingImage) (cleaned bool, err error) { bids, err := bic.ds.GetBackingImageDataSource(bi.Name) if err != nil { @@ -396,8 +523,10 @@ func (bic *BackingImageController) handleBackingImageDataSource(bi *longhorn.Bac } } } + + // JackLin: BackingIamge Data Source choose node/disk if !foundReadyDisk { - readyNode, readyDiskName, err := bic.ds.GetRandomReadyNodeDisk() + readyNode, readyDiskName, err := bic.ds.GetReadyNodeDiskForBackingImage(bi, map[string]bool{}) if err != nil { return err } @@ -528,7 +657,7 @@ func (bic *BackingImageController) handleBackingImageDataSource(bi *longhorn.Bac changeNodeDisk := err != nil || node.Name != bids.Spec.NodeID || node.Spec.Disks[diskName].Path != bids.Spec.DiskPath || node.Status.DiskStatus[diskName].DiskUUID != bids.Spec.DiskUUID if changeNodeDisk { log.Warn("Backing image data source current node and disk is not ready, need to switch to another ready node and disk") - readyNode, readyDiskName, err := bic.ds.GetRandomReadyNodeDisk() + readyNode, readyDiskName, err := bic.ds.GetReadyNodeDiskForBackingImage(bi, map[string]bool{}) if err != nil { return err } @@ -827,6 +956,60 @@ func (bic *BackingImageController) enqueueBackingImageForBackingImageDataSource( bic.enqueueBackingImage(obj) } +func (bic *BackingImageController) enqueueBackingImageForNodeUpdate(oldObj, currObj interface{}) { + oldNode, ok := oldObj.(*longhorn.Node) + if !ok { + deletedState, ok := oldObj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("received unexpected obj: %#v", oldObj)) + return + } + + // use the last known state, to enqueue, dependent objects + oldNode, ok = deletedState.Obj.(*longhorn.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("DeletedFinalStateUnknown contained invalid object: %#v", deletedState.Obj)) + return + } + } + + currNode, ok := currObj.(*longhorn.Node) + if !ok { + deletedState, ok := currObj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("received unexpected obj: %#v", currObj)) + return + } + + // use the last known state, to enqueue, dependent objects + currNode, ok = deletedState.Obj.(*longhorn.Node) + if !ok { + utilruntime.HandleError(fmt.Errorf("DeletedFinalStateUnknown contained invalid object: %#v", deletedState.Obj)) + return + } + } + + diskBackingImageMap, err := bic.ds.GetDiskBackingImageMap(oldNode) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get disk backing image map when handling node udpate")) + return + } + + // if a node or disk changes its EvictionRequested, enqueue all backing image copies on that node/disk + evictionRequestedChangeOnNodeLevel := currNode.Spec.EvictionRequested != oldNode.Spec.EvictionRequested + for diskName, newDiskSpec := range currNode.Spec.Disks { + oldDiskSpec, ok := oldNode.Spec.Disks[diskName] + evictionRequestedChangeOnDiskLevel := !ok || (newDiskSpec.EvictionRequested != oldDiskSpec.EvictionRequested) + if diskStatus, existed := currNode.Status.DiskStatus[diskName]; existed && (evictionRequestedChangeOnNodeLevel || evictionRequestedChangeOnDiskLevel) { + diskUUID := diskStatus.DiskUUID + for _, backingImage := range diskBackingImageMap[diskUUID] { + bic.enqueueBackingImage(backingImage) + } + } + } + +} + func (bic *BackingImageController) enqueueBackingImageForReplica(obj interface{}) { replica, isReplica := obj.(*longhorn.Replica) if !isReplica { diff --git a/controller/node_controller.go b/controller/node_controller.go index 8c70389c55..290f0d9445 100644 --- a/controller/node_controller.go +++ b/controller/node_controller.go @@ -577,6 +577,10 @@ func (nc *NodeController) syncNode(key string) (err error) { return err } + if err := nc.syncBackingImageEvictionRequested(node); err != nil { + return err + } + return nil } @@ -1187,7 +1191,7 @@ func (nc *NodeController) cleanUpBackingImagesInDisks(node *longhorn.Node) error continue } existingBackingImage := bi.DeepCopy() - BackingImageDiskFileCleanup(node, bi, bids, waitInterval, 1) + BackingImageDiskFileCleanup(node, bi, bids, waitInterval, bi.Spec.MinNumberOfCopies) if !reflect.DeepEqual(existingBackingImage.Spec, bi.Spec) { if _, err := nc.ds.UpdateBackingImage(bi); err != nil { log.WithError(err).Warn("Failed to update backing image when cleaning up the images in disks") @@ -1201,13 +1205,13 @@ func (nc *NodeController) cleanUpBackingImagesInDisks(node *longhorn.Node) error return nil } -func BackingImageDiskFileCleanup(node *longhorn.Node, bi *longhorn.BackingImage, bids *longhorn.BackingImageDataSource, waitInterval time.Duration, haRequirement int) { +func BackingImageDiskFileCleanup(node *longhorn.Node, bi *longhorn.BackingImage, bids *longhorn.BackingImageDataSource, waitInterval time.Duration, minNumberOfCopies int) { if bi.Spec.Disks == nil || bi.Status.DiskLastRefAtMap == nil { return } - if haRequirement < 1 { - haRequirement = 1 + if minNumberOfCopies < 1 { + minNumberOfCopies = 1 } var readyDiskFileCount, handlingDiskFileCount, failedDiskFileCount int @@ -1263,17 +1267,17 @@ func BackingImageDiskFileCleanup(node *longhorn.Node, bi *longhorn.BackingImage, } switch fileStatus.State { case longhorn.BackingImageStateFailed: - if haRequirement >= readyDiskFileCount+handlingDiskFileCount+failedDiskFileCount { + if minNumberOfCopies >= readyDiskFileCount+handlingDiskFileCount+failedDiskFileCount { continue } failedDiskFileCount-- case longhorn.BackingImageStateReadyForTransfer, longhorn.BackingImageStateReady: - if haRequirement >= readyDiskFileCount { + if minNumberOfCopies >= readyDiskFileCount { continue } readyDiskFileCount-- default: - if haRequirement >= readyDiskFileCount+handlingDiskFileCount { + if minNumberOfCopies >= readyDiskFileCount+handlingDiskFileCount { continue } handlingDiskFileCount-- @@ -1584,6 +1588,70 @@ func (nc *NodeController) createSnapshotMonitor() (mon monitor.Monitor, err erro return mon, nil } +func (nc *NodeController) syncBackingImageEvictionRequested(node *longhorn.Node) error { + // preventing periodically list all backingimage. + if !isNodeOrDisksEvictionRequested(node) { + return nil + } + log := getLoggerForNode(nc.logger, node) + + diskBackingImageMap, err := nc.ds.GetDiskBackingImageMap(node) + if err != nil { + return err + } + + type backingImageToSync struct { + *longhorn.BackingImage + diskUUID string + evict bool + } + backingImagesToSync := []backingImageToSync{} + + for diskName, diskSpec := range node.Spec.Disks { + diskStatus := node.Status.DiskStatus[diskName] + diskUUID := diskStatus.DiskUUID + + if diskSpec.EvictionRequested || node.Spec.EvictionRequested { + for _, backingImage := range diskBackingImageMap[diskUUID] { + // trigger eviction request + backingImage.Status.DiskFileStatusMap[diskUUID].EvictionRequested = true + backingImagesToSync = append(backingImagesToSync, backingImageToSync{backingImage, diskUUID, true}) + } + } else { + for _, backingImage := range diskBackingImageMap[diskUUID] { + if backingImage.Status.DiskFileStatusMap[diskUUID].EvictionRequested { + // if it is previously set to true, cancel the eviction request + backingImage.Status.DiskFileStatusMap[diskUUID].EvictionRequested = false + backingImagesToSync = append(backingImagesToSync, backingImageToSync{backingImage, diskUUID, false}) + } + } + } + } + + for _, backingImageToSync := range backingImagesToSync { + backingImageLog := log.WithField("backingimage", backingImageToSync.Name).WithField("disk", backingImageToSync.diskUUID) + if backingImageToSync.evict { + backingImageLog.Infof("Requesting backing image copy eviction") + if _, err := nc.ds.UpdateBackingImageStatus(backingImageToSync.BackingImage); err != nil { + backingImageLog.Warn("Failed to request backing image copy eviction, will enqueue then resync the node") + nc.enqueueNodeRateLimited(node) + continue + } + nc.eventRecorder.Eventf(backingImageToSync.BackingImage, corev1.EventTypeNormal, constant.EventReasonEvictionUserRequested, "Requesting backing image %v eviction from node %v and disk %v", backingImageToSync.Name, node.Spec.Name, backingImageToSync.diskUUID) + } else { + backingImageLog.Infof("Cancelling backing image copy eviction") + if _, err := nc.ds.UpdateBackingImageStatus(backingImageToSync.BackingImage); err != nil { + backingImageLog.Warn("Failed to cancel backing image copy eviction, will enqueue then resync the node") + nc.enqueueNodeRateLimited(node) + continue + } + nc.eventRecorder.Eventf(backingImageToSync.BackingImage, corev1.EventTypeNormal, constant.EventReasonEvictionCanceled, "Requesting backing image %v eviction from node %v and disk %v", backingImageToSync.Name, node.Spec.Name, backingImageToSync.diskUUID) + } + } + + return nil +} + func (nc *NodeController) syncReplicaEvictionRequested(node *longhorn.Node, kubeNode *corev1.Node) error { log := getLoggerForNode(nc.logger, node) node.Status.AutoEvicting = false @@ -1690,3 +1758,17 @@ func (nc *NodeController) shouldEvictReplica(node *longhorn.Node, kubeNode *core return false, constant.EventReasonEvictionCanceled, nil } + +func isNodeOrDisksEvictionRequested(node *longhorn.Node) bool { + if node.Spec.EvictionRequested { + return true + } + + for _, diskSpec := range node.Spec.Disks { + if diskSpec.EvictionRequested { + return true + } + } + + return false +} diff --git a/datastore/longhorn.go b/datastore/longhorn.go index fec7944db9..e30ea5483e 100644 --- a/datastore/longhorn.go +++ b/datastore/longhorn.go @@ -2641,18 +2641,32 @@ func (s *DataStore) ListReadyNodesContainingEngineImageRO(image string) (map[str return readyNodes, nil } -// GetRandomReadyNodeDisk a list of all Node the in the given namespace and +// GetReadyNodeDiskForBackingImage a list of all Node the in the given namespace and // returns the first Node && the first Disk of the Node marked with condition ready and allow scheduling -func (s *DataStore) GetRandomReadyNodeDisk() (*longhorn.Node, string, error) { +func (s *DataStore) GetReadyNodeDiskForBackingImage(backingImage *longhorn.BackingImage, usedDisks map[string]bool) (*longhorn.Node, string, error) { logrus.Info("Preparing to find a random ready node disk") nodes, err := s.ListNodesRO() if err != nil { return nil, "", errors.Wrapf(err, "failed to get random ready node disk") } + allowEmptyNodeSelectorVolume, err := s.GetSettingAsBool(types.SettingNameAllowEmptyNodeSelectorVolume) + if err != nil { + return nil, "", errors.Wrapf(err, "failed to get %v setting", types.SettingNameAllowEmptyNodeSelectorVolume) + } + + allowEmptyDiskSelectorVolume, err := s.GetSettingAsBool(types.SettingNameAllowEmptyDiskSelectorVolume) + if err != nil { + return nil, "", errors.Wrapf(err, "failed to get %v setting", types.SettingNameAllowEmptyDiskSelectorVolume) + } + r := rand.New(rand.NewSource(time.Now().UnixNano())) r.Shuffle(len(nodes), func(i, j int) { nodes[i], nodes[j] = nodes[j], nodes[i] }) for _, node := range nodes { + if !types.IsSelectorsInTags(node.Spec.Tags, backingImage.Spec.NodeSelector, allowEmptyNodeSelectorVolume) { + continue + } + if !node.Spec.AllowScheduling { continue } @@ -2664,6 +2678,12 @@ func (s *DataStore) GetRandomReadyNodeDisk() (*longhorn.Node, string, error) { if !exists { continue } + if !types.IsSelectorsInTags(diskSpec.Tags, backingImage.Spec.DiskSelector, allowEmptyDiskSelectorVolume) { + continue + } + if _, exists := usedDisks[diskStatus.DiskUUID]; exists { + continue + } // TODO: Jack add block type disk for spdk version BackingImage if diskSpec.Type != longhorn.DiskTypeFilesystem { continue @@ -4952,6 +4972,22 @@ func (s *DataStore) IsV2DataEngineDisabledForNode(nodeName string) (bool, error) return false, nil } +func (s *DataStore) GetDiskBackingImageMap(node *longhorn.Node) (map[string][]*longhorn.BackingImage, error) { + diskBackingImageMap := map[string][]*longhorn.BackingImage{} + backingImages, err := s.ListBackingImages() + if err != nil { + return nil, err + } + + for _, bi := range backingImages { + for diskUIID := range bi.Status.DiskFileStatusMap { + diskBackingImageMap[diskUIID] = append(diskBackingImageMap[diskUIID], bi) + } + } + + return diskBackingImageMap, nil +} + // CreateBackupBackingImage creates a Longhorn BackupBackingImage resource and verifies func (s *DataStore) CreateBackupBackingImage(backupBackingImage *longhorn.BackupBackingImage) (*longhorn.BackupBackingImage, error) { ret, err := s.lhClient.LonghornV1beta2().BackupBackingImages(s.namespace).Create(context.TODO(), backupBackingImage, metav1.CreateOptions{}) diff --git a/k8s/crds.yaml b/k8s/crds.yaml index 7dd3cab138..30c65269bc 100644 --- a/k8s/crds.yaml +++ b/k8s/crds.yaml @@ -441,10 +441,20 @@ spec: properties: checksum: type: string + diskSelector: + items: + type: string + type: array disks: additionalProperties: type: string type: object + minNumberOfCopies: + type: integer + nodeSelector: + items: + type: string + type: array sourceParameters: additionalProperties: type: string @@ -465,6 +475,8 @@ spec: diskFileStatusMap: additionalProperties: properties: + evictionRequested: + type: boolean lastStateTransitionTime: type: string message: diff --git a/k8s/generate_code.sh b/k8s/generate_code.sh index 7d8ef7970b..5374b12b2b 100755 --- a/k8s/generate_code.sh +++ b/k8s/generate_code.sh @@ -37,14 +37,14 @@ if [[ ! -d "${GOPATH}/src/k8s.io/code-generator" ]]; then fi # https://github.com/kubernetes-sigs/controller-tools/tree/v0.7.0/cmd/controller-gen -if ! command -v controller-gen > /dev/null; then +if ! command -v ${GOPATH}/bin/controller-gen > /dev/null; then echo "controller-gen is missing" echo "Prepare to install controller-gen" go install sigs.k8s.io/controller-tools/cmd/controller-gen@${CONTROLLER_TOOLS_VERSION} fi # https://github.com/kubernetes-sigs/kustomize/tree/kustomize/v3.10.0/kustomize -if ! command -v kustomize > /dev/null; then +if ! command -v ${GOPATH}/bin/kustomize > /dev/null; then echo "kustomize is missing" echo "Prepare to install kustomize" mkdir -p ${GOPATH}/src/github.com/kubernetes-sigs @@ -65,16 +65,16 @@ bash ${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \ $@ echo Generating CRD -controller-gen crd paths=${APIS_DIR}/... output:crd:dir=${CRDS_DIR} +${GOPATH}/bin/controller-gen crd paths=${APIS_DIR}/... output:crd:dir=${CRDS_DIR} pushd ${CRDS_DIR} -kustomize create --autodetect 2>/dev/null || true -kustomize edit add label longhorn-manager: 2>/dev/null || true +${GOPATH}/bin/kustomize create --autodetect 2>/dev/null || true +${GOPATH}/bin/kustomize edit add label longhorn-manager: 2>/dev/null || true if [ -e ${GOPATH}/src/${LH_MANAGER_DIR}/k8s/patches/crd ]; then cp -a ${GOPATH}/src/${LH_MANAGER_DIR}/k8s/patches/crd patches - find patches -type f | xargs -i sh -c 'kustomize edit add patch --path {}' + find patches -type f | xargs -i sh -c '${GOPATH}/bin/kustomize edit add patch --path {}' fi popd echo "# Generated by the CRDs from ${APIS_DIR}" > ${GOPATH}/src/${LH_MANAGER_DIR}/k8s/crds.yaml -kustomize build ${CRDS_DIR} >> ${GOPATH}/src/${LH_MANAGER_DIR}/k8s/crds.yaml +${GOPATH}/bin/kustomize build ${CRDS_DIR} >> ${GOPATH}/src/${LH_MANAGER_DIR}/k8s/crds.yaml rm -r ${CRDS_DIR} diff --git a/k8s/pkg/apis/longhorn/v1beta2/backingimage.go b/k8s/pkg/apis/longhorn/v1beta2/backingimage.go index a68fc3e2d4..99272f99e0 100644 --- a/k8s/pkg/apis/longhorn/v1beta2/backingimage.go +++ b/k8s/pkg/apis/longhorn/v1beta2/backingimage.go @@ -34,6 +34,8 @@ type BackingImageDiskFileStatus struct { Message string `json:"message"` // +optional LastStateTransitionTime string `json:"lastStateTransitionTime"` + // +optional + EvictionRequested bool `json:"evictionRequested"` } // BackingImageSpec defines the desired state of the Longhorn backing image @@ -46,6 +48,12 @@ type BackingImageSpec struct { SourceType BackingImageDataSourceType `json:"sourceType"` // +optional SourceParameters map[string]string `json:"sourceParameters"` + // +optional + MinNumberOfCopies int `json:"minNumberOfCopies"` + // +optional + DiskSelector []string `json:"diskSelector"` + // +optional + NodeSelector []string `json:"nodeSelector"` } // BackingImageStatus defines the observed state of the Longhorn backing image status diff --git a/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go b/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go index 48cd18dada..457dc968d1 100644 --- a/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go +++ b/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go @@ -393,6 +393,16 @@ func (in *BackingImageSpec) DeepCopyInto(out *BackingImageSpec) { (*out)[key] = val } } + if in.DiskSelector != nil { + in, out := &in.DiskSelector, &out.DiskSelector + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/types/setting.go b/types/setting.go index 5e8cbc3923..42ba868d11 100644 --- a/types/setting.go +++ b/types/setting.go @@ -128,6 +128,7 @@ const ( SettingNameV2DataEngineGuaranteedInstanceManagerCPU = SettingName("v2-data-engine-guaranteed-instance-manager-cpu") SettingNameV2DataEngineLogLevel = SettingName("v2-data-engine-log-level") SettingNameV2DataEngineLogFlags = SettingName("v2-data-engine-log-flags") + SettingNameDefaultMinNumberOfBackingImageCopies = SettingName("default-min-number-of-backing-image-copies") ) var ( @@ -324,6 +325,7 @@ var ( SettingNameAllowEmptyNodeSelectorVolume: SettingDefinitionAllowEmptyNodeSelectorVolume, SettingNameAllowEmptyDiskSelectorVolume: SettingDefinitionAllowEmptyDiskSelectorVolume, SettingNameDisableSnapshotPurge: SettingDefinitionDisableSnapshotPurge, + SettingNameDefaultMinNumberOfBackingImageCopies: SettingDefinitionDefaultMinNumberOfBackingImageCopies, } SettingDefinitionBackupTarget = SettingDefinition{ @@ -1352,6 +1354,19 @@ var ( ReadOnly: false, Default: "", } + + SettingDefinitionDefaultMinNumberOfBackingImageCopies = SettingDefinition{ + DisplayName: "Default Minimum Number of BackingImage Copies", + Description: "The default minimum number of backing image copies Longhorn maintains", + Category: SettingCategoryGeneral, + Type: SettingTypeInt, + Required: true, + ReadOnly: false, + Default: "1", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 1, + }, + } ) type NodeDownPodDeletionPolicy string diff --git a/types/types.go b/types/types.go index 7c6996d0c5..22e07339cd 100644 --- a/types/types.go +++ b/types/types.go @@ -735,6 +735,17 @@ func ValidateReplicaCount(count int) error { return nil } +func ValidateMinNumberOfBackingIamgeCopies(number int) error { + + definition, _ := GetSettingDefinition(SettingNameDefaultMinNumberOfBackingImageCopies) + valueIntRange := definition.ValueIntRange + + if number < valueIntRange[ValueIntRangeMinimum] { + return fmt.Errorf("minimum number of backing image copies %v must larger than %v", number, valueIntRange[ValueIntRangeMaximum]) + } + return nil +} + func ValidateV2DataEngineLogLevel(level string) error { switch strings.ToLower(level) { case "disabled", "error", "warn", "notice", "info", "debug": diff --git a/webhook/resources/backingimage/mutator.go b/webhook/resources/backingimage/mutator.go index 31e560bcb8..dba970166a 100644 --- a/webhook/resources/backingimage/mutator.go +++ b/webhook/resources/backingimage/mutator.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" @@ -108,6 +109,17 @@ func (b *backingImageMutator) Create(request *admission.Request, newObj runtime. err := errors.Wrapf(err, "failed to get label patch for backingImage %v", backingImage.Name) return nil, werror.NewInvalidError(err.Error(), "") } + + if backingImage.Spec.MinNumberOfCopies == 0 { + minNumberOfCopies, err := b.getDefaultMinNumberOfBackingImageCopies() + if err != nil { + err = errors.Wrap(err, "BUG: cannot get valid number for setting default min number of backing image copies") + return nil, werror.NewInvalidError(err.Error(), "") + } + logrus.Infof("Use the default number of minimum number of copies %v", minNumberOfCopies) + patchOps = append(patchOps, fmt.Sprintf(`{"op": "replace", "path": "/spec/minNumberOfCopies", "value": %v}`, minNumberOfCopies)) + } + patchOps = append(patchOps, patchOp) return patchOps, nil @@ -158,3 +170,11 @@ func mutate(newObj runtime.Object) (admission.PatchOps, error) { return patchOps, nil } + +func (b *backingImageMutator) getDefaultMinNumberOfBackingImageCopies() (int, error) { + c, err := b.ds.GetSettingAsInt(types.SettingNameDefaultMinNumberOfBackingImageCopies) + if err != nil { + return 0, err + } + return int(c), nil +} diff --git a/webhook/resources/backingimage/validator.go b/webhook/resources/backingimage/validator.go index c20bf3cff6..0dd6bce02a 100644 --- a/webhook/resources/backingimage/validator.go +++ b/webhook/resources/backingimage/validator.go @@ -56,6 +56,10 @@ func (b *backingImageValidator) Create(request *admission.Request, newObj runtim } } + if err := validateMinNumberOfBackingImageCopies(backingImage.Spec.MinNumberOfCopies); err != nil { + return werror.NewInvalidError(err.Error(), "") + } + switch longhorn.BackingImageDataSourceType(backingImage.Spec.SourceType) { case longhorn.BackingImageDataSourceTypeDownload: if backingImage.Spec.SourceParameters[longhorn.DataSourceTypeDownloadParameterURL] == "" { @@ -92,6 +96,23 @@ func (b *backingImageValidator) Create(request *admission.Request, newObj runtim return nil } +func (v *backingImageValidator) Update(request *admission.Request, oldObj runtime.Object, newObj runtime.Object) error { + _, ok := oldObj.(*longhorn.BackingImage) + if !ok { + return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.BackingImage", oldObj), "") + } + newBackingImage, ok := newObj.(*longhorn.BackingImage) + if !ok { + return werror.NewInvalidError(fmt.Sprintf("%v is not a *longhorn.BackingImage", newObj), "") + } + + if err := validateMinNumberOfBackingImageCopies(newBackingImage.Spec.MinNumberOfCopies); err != nil { + return werror.NewInvalidError(err.Error(), "") + } + + return nil +} + func (b *backingImageValidator) Delete(request *admission.Request, oldObj runtime.Object) error { backingImage, ok := oldObj.(*longhorn.BackingImage) if !ok { @@ -107,3 +128,10 @@ func (b *backingImageValidator) Delete(request *admission.Request, oldObj runtim } return nil } + +func validateMinNumberOfBackingImageCopies(number int) error { + if err := types.ValidateMinNumberOfBackingIamgeCopies(number); err != nil { + return werror.NewInvalidError(err.Error(), "") + } + return nil +}