diff --git a/go.mod b/go.mod index bd817d2e9..6c2c040b9 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,8 @@ require ( github.com/RoaringBitmap/roaring v0.4.18 github.com/golang/protobuf v1.3.3-0.20190920234318-1680a479a2cf github.com/google/uuid v1.3.0 - github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3 - github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b // indirect - github.com/longhorn/longhorn-engine v1.3.2-0.20220929032851-7aac8ae9c8b4 + github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a + github.com/longhorn/longhorn-engine v1.3.3-0.20230216042703-718990dc8a35 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.8.1 github.com/tinylib/msgp v1.1.1-0.20190612170807-0573788bc2a8 // indirect diff --git a/go.sum b/go.sum index 962561066..055becc6f 100644 --- a/go.sum +++ b/go.sum @@ -46,14 +46,12 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a h1:f+mLqp3A5M7plw1pBgf8K1nvJxSU7mrGtU7bii+W5Bk= github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a/go.mod h1:hvIVsrpjPey7KupirAh0WoPMg0ArWnE6fA5bI30X7AI= -github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3 h1:CDUWRyBlxV62OvuiBIQpMhHytDoqr3M3nvoXbFhhHBE= -github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3/go.mod h1:73zwYfMYt/JacoG4y86p3Ct6MEzIhHtbxo02AAO+TmA= -github.com/longhorn/go-iscsi-helper v0.0.0-20220805034259-7b59e22574bb/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= -github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b h1:nGWLOG/u+hT1aQnygRavj31JxPwfig/Ts8EnqUVEW6o= -github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= -github.com/longhorn/longhorn-engine v1.3.2-0.20220929032851-7aac8ae9c8b4 h1:hNdnSDe6vRZ3uIi6rZIsVITdXAR0G86gLIOJVQtICCo= -github.com/longhorn/longhorn-engine v1.3.2-0.20220929032851-7aac8ae9c8b4/go.mod h1:AC5smEVsxdJKI+joa0vSUPLd0Cp/xEQiqAsh3UM3UfY= +github.com/longhorn/go-iscsi-helper v0.0.0-20230215045129-588aa7586e4c h1:M4UR1cVrVHHKqwc4aL3MUlp1d/Z2i7T7VS1y0SSUwa8= +github.com/longhorn/go-iscsi-helper v0.0.0-20230215045129-588aa7586e4c/go.mod h1:9z/y9glKmWEdV50tjlUPxFwi1goQfIrrsoZbnMyIZbY= +github.com/longhorn/longhorn-engine v1.3.3-0.20230216042703-718990dc8a35 h1:3RgCpW+8XGrUq3U5pSH1cDszVu6gIWhGs2akEEdtAZc= +github.com/longhorn/longhorn-engine v1.3.3-0.20230216042703-718990dc8a35/go.mod h1:rFBcnmpcs6un/I1InIzb18kPni64pH2iclEyoGrdXO0= github.com/longhorn/nsfilelock v0.0.0-20200723175406-fa7c83ad0003/go.mod h1:0CLeXlf59Lg6C0kjLSDf47ft73Dh37CwymYRKWwAn04= github.com/longhorn/sparse-tools v0.0.0-20220323120706-0bd9b4129826 h1:8IeuJT9y0xFGSfSl9dOG/L6dJOGtIrS3nYqgo3eYzao= github.com/longhorn/sparse-tools v0.0.0-20220323120706-0bd9b4129826/go.mod h1:BWM7yTPb1DulG18EE/Jy20LVIySzIYoZpiOYFtAGwZo= @@ -65,8 +63,6 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbM github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= -github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/longhorn/backupstore/Dockerfile.dapper b/vendor/github.com/longhorn/backupstore/Dockerfile.dapper index c6c3f58b6..b965fb96a 100644 --- a/vendor/github.com/longhorn/backupstore/Dockerfile.dapper +++ b/vendor/github.com/longhorn/backupstore/Dockerfile.dapper @@ -19,7 +19,7 @@ ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH=GOLANG_ARCH_${ARC GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash RUN wget -O - https://storage.googleapis.com/golang/go1.13.3.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local && \ - go get github.com/rancher/trash && GO111MODULE=on go get golang.org/x/lint/golint@v0.0.0-20210508222113-6edffad5e616 + go get github.com/rancher/trash && go get -u golang.org/x/lint/golint ENV DAPPER_SOURCE /go/src/github.com/longhorn/backupstore ENV DAPPER_OUTPUT ./bin diff --git a/vendor/github.com/longhorn/backupstore/backupstore.go b/vendor/github.com/longhorn/backupstore/backupstore.go index 01fe59ecf..3321066fb 100644 --- a/vendor/github.com/longhorn/backupstore/backupstore.go +++ b/vendor/github.com/longhorn/backupstore/backupstore.go @@ -3,10 +3,8 @@ package backupstore import ( "fmt" "net/url" - "sync" "github.com/longhorn/backupstore/util" - "github.com/pkg/errors" ) type Volume struct { @@ -19,7 +17,6 @@ type Volume struct { BlockCount int64 `json:",string"` BackingImageName string `json:",string"` BackingImageChecksum string `json:",string"` - CompressionMethod string `json:",string"` } type Snapshot struct { @@ -27,13 +24,7 @@ type Snapshot struct { CreatedTime string } -type ProcessingBlocks struct { - sync.Mutex - blocks map[string][]*BlockMapping -} - type Backup struct { - sync.Mutex Name string VolumeName string SnapshotName string @@ -42,9 +33,6 @@ type Backup struct { Size int64 `json:",string"` Labels map[string]string IsIncremental bool - CompressionMethod string - - ProcessingBlocks *ProcessingBlocks Blocks []BlockMapping `json:",omitempty"` SingleFile BackupFile `json:",omitempty"` @@ -62,27 +50,27 @@ func GetBackupstoreBase() string { return backupstoreBase } -func addVolume(driver BackupStoreDriver, volume *Volume) error { - if volumeExists(driver, volume.Name) { +func addVolume(volume *Volume, driver BackupStoreDriver) error { + if volumeExists(volume.Name, driver) { return nil } if !util.ValidateName(volume.Name) { - return fmt.Errorf("invalid volume name %v", volume.Name) + return fmt.Errorf("Invalid volume name %v", volume.Name) } - if err := saveVolume(driver, volume); err != nil { - log.WithError(err).Errorf("Failed to add volume %v", volume.Name) + if err := saveVolume(volume, driver); err != nil { + log.Error("Fail add volume ", volume.Name) return err } + log.Debug("Added backupstore volume ", volume.Name) - log.Infof("Added backupstore volume %v", volume.Name) return nil } func removeVolume(volumeName string, driver BackupStoreDriver) error { if !util.ValidateName(volumeName) { - return fmt.Errorf("invalid volume name %v", volumeName) + return fmt.Errorf("Invalid volume name %v", volumeName) } volumeDir := getVolumePath(volumeName) @@ -90,20 +78,20 @@ func removeVolume(volumeName string, driver BackupStoreDriver) error { volumeBackupsDirectory := getBackupPath(volumeName) volumeLocksDirectory := getLockPath(volumeName) if err := driver.Remove(volumeBackupsDirectory); err != nil { - return errors.Wrapf(err, "failed to remove all the backups for volume %v", volumeName) + return fmt.Errorf("failed to remove all the backups for volume %v: %v", volumeName, err) } if err := driver.Remove(volumeBlocksDirectory); err != nil { - return errors.Wrapf(err, "failed to remove all the blocks for volume %v", volumeName) + return fmt.Errorf("failed to remove all the blocks for volume %v: %v", volumeName, err) } if err := driver.Remove(volumeLocksDirectory); err != nil { - return errors.Wrapf(err, "failed to remove all the locks for volume %v", volumeName) + return fmt.Errorf("failed to remove all the locks for volume %v: %v", volumeName, err) } if err := driver.Remove(volumeDir); err != nil { - return errors.Wrapf(err, "failed to remove backup volume %v directory in backupstore", volumeName) + return fmt.Errorf("failed to remove backup volume %v directory in backupstore: %v", volumeName, err) } - log.Infof("Removed volume directory in backupstore %v", volumeDir) - log.Infof("Removed backupstore volume %v", volumeName) + log.Debug("Removed volume directory in backupstore: ", volumeDir) + log.Debug("Removed backupstore volume ", volumeName) return nil } @@ -126,10 +114,10 @@ func DecodeBackupURL(backupURL string) (string, string, string, error) { volumeName := v.Get("volume") backupName := v.Get("backup") if !util.ValidateName(volumeName) { - return "", "", "", fmt.Errorf("invalid volume name parsed, got %v", volumeName) + return "", "", "", fmt.Errorf("Invalid volume name parsed, got %v", volumeName) } if backupName != "" && !util.ValidateName(backupName) { - return "", "", "", fmt.Errorf("invalid backup name parsed, got %v", backupName) + return "", "", "", fmt.Errorf("Invalid backup name parsed, got %v", backupName) } u.RawQuery = "" destURL := u.String() @@ -145,5 +133,5 @@ func LoadVolume(backupURL string) (*Volume, error) { if err != nil { return nil, err } - return loadVolume(driver, volumeName) + return loadVolume(volumeName, driver) } diff --git a/vendor/github.com/longhorn/backupstore/config.go b/vendor/github.com/longhorn/backupstore/config.go index 2fa1a2f89..9e38d3af7 100644 --- a/vendor/github.com/longhorn/backupstore/config.go +++ b/vendor/github.com/longhorn/backupstore/config.go @@ -33,7 +33,7 @@ func getBackupConfigName(id string) string { return BACKUP_CONFIG_PREFIX + id + CFG_SUFFIX } -func LoadConfigInBackupStore(driver BackupStoreDriver, filePath string, v interface{}) error { +func loadConfigInBackupStore(filePath string, driver BackupStoreDriver, v interface{}) error { if !driver.FileExists(filePath) { return fmt.Errorf("cannot find %v in backupstore", filePath) } @@ -61,7 +61,7 @@ func LoadConfigInBackupStore(driver BackupStoreDriver, filePath string, v interf return nil } -func SaveConfigInBackupStore(driver BackupStoreDriver, filePath string, v interface{}) error { +func saveConfigInBackupStore(filePath string, driver BackupStoreDriver, v interface{}) error { j, err := json.Marshal(v) if err != nil { return err @@ -84,51 +84,9 @@ func SaveConfigInBackupStore(driver BackupStoreDriver, filePath string, v interf return nil } -func SaveLocalFileToBackupStore(localFilePath, backupStoreFilePath string, driver BackupStoreDriver) error { - log := log.WithFields(logrus.Fields{ - LogFieldReason: LogReasonStart, - LogFieldObject: LogObjectConfig, - LogFieldKind: driver.Kind(), - LogFieldFilepath: localFilePath, - LogFieldDestURL: backupStoreFilePath, - }) - log.Debug() - - if driver.FileExists(backupStoreFilePath) { - return fmt.Errorf("%v already exists", backupStoreFilePath) - } - - if err := driver.Upload(localFilePath, backupStoreFilePath); err != nil { - return err - } - - log.WithField(LogFieldReason, LogReasonComplete).Debug() - return nil -} - -func SaveBackupStoreToLocalFile(driver BackupStoreDriver, backupStoreFileURL, localFilePath string) error { - log := log.WithFields(logrus.Fields{ - LogFieldReason: LogReasonStart, - LogFieldObject: LogObjectConfig, - LogFieldKind: driver.Kind(), - LogFieldFilepath: localFilePath, - LogFieldSourceURL: backupStoreFileURL, - }) - log.Debug() - - if err := driver.Download(backupStoreFileURL, localFilePath); err != nil { - return err - } - - log = log.WithFields(logrus.Fields{ - LogFieldReason: LogReasonComplete, - }) - log.Debug() - return nil -} - -func volumeExists(driver BackupStoreDriver, volumeName string) bool { - return driver.FileExists(getVolumeFilePath(volumeName)) +func volumeExists(volumeName string, driver BackupStoreDriver) bool { + volumeFile := getVolumeFilePath(volumeName) + return driver.FileExists(volumeFile) } func getVolumePath(volumeName string) string { @@ -150,7 +108,7 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat volumePathBase := filepath.Join(backupstoreBase, VOLUME_DIRECTORY) lv1Dirs, err := driver.List(volumePathBase) if err != nil { - log.WithError(err).Warnf("Failed to list first level dirs for path %v", volumePathBase) + log.Warnf("failed to list first level dirs for path: %v reason: %v", volumePathBase, err) return names, err } @@ -164,7 +122,7 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat lv1Tracker := jobQueues.QueueTimedFunc(context.Background(), func(ctx context.Context) (interface{}, error) { lv2Dirs, err := driver.List(path) if err != nil { - log.WithError(err).Warnf("Failed to list second level dirs for path %v", path) + log.Warnf("failed to list second level dirs for path: %v reason: %v", path, err) return nil, err } @@ -190,7 +148,7 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat lv2Tracker := jobQueues.QueueTimedFunc(context.Background(), func(ctx context.Context) (interface{}, error) { volumeNames, err := driver.List(path) if err != nil { - log.WithError(err).Warnf("Failed to list volume names for path %v", path) + log.Warnf("failed to list volume names for path: %v reason: %v", path, err) return nil, err } return volumeNames, nil @@ -214,25 +172,24 @@ func getVolumeNames(jobQueues *jobq.WorkerDispatcher, jobQueueTimeout time.Durat return names, nil } -func loadVolume(driver BackupStoreDriver, volumeName string) (*Volume, error) { +func loadVolume(volumeName string, driver BackupStoreDriver) (*Volume, error) { v := &Volume{} file := getVolumeFilePath(volumeName) - if err := LoadConfigInBackupStore(driver, file, v); err != nil { + if err := loadConfigInBackupStore(file, driver, v); err != nil { return nil, err } - // Backward compatibility - if v.CompressionMethod == "" { - log.Infof("Fall back compression method to %v for volume %v", LEGACY_COMPRESSION_METHOD, v.Name) - v.CompressionMethod = LEGACY_COMPRESSION_METHOD - } return v, nil } -func saveVolume(driver BackupStoreDriver, v *Volume) error { - return SaveConfigInBackupStore(driver, getVolumeFilePath(v.Name), v) +func saveVolume(v *Volume, driver BackupStoreDriver) error { + file := getVolumeFilePath(v.Name) + if err := saveConfigInBackupStore(file, driver, v); err != nil { + return err + } + return nil } -func getBackupNamesForVolume(driver BackupStoreDriver, volumeName string) ([]string, error) { +func getBackupNamesForVolume(volumeName string, driver BackupStoreDriver) ([]string, error) { result := []string{} fileList, err := driver.List(getBackupPath(volumeName)) if err != nil { @@ -256,25 +213,27 @@ func isBackupInProgress(backup *Backup) bool { return backup != nil && backup.CreatedTime == "" } -func loadBackup(bsDriver BackupStoreDriver, backupName, volumeName string) (*Backup, error) { +func backupExists(backupName, volumeName string, bsDriver BackupStoreDriver) bool { + return bsDriver.FileExists(getBackupConfigPath(backupName, volumeName)) +} + +func loadBackup(backupName, volumeName string, bsDriver BackupStoreDriver) (*Backup, error) { backup := &Backup{} - if err := LoadConfigInBackupStore(bsDriver, getBackupConfigPath(backupName, volumeName), backup); err != nil { + if err := loadConfigInBackupStore(getBackupConfigPath(backupName, volumeName), bsDriver, backup); err != nil { return nil, err } - // Backward compatibility - if backup.CompressionMethod == "" { - log.Infof("Fall back compression method to %v for backup %v", LEGACY_COMPRESSION_METHOD, backup.Name) - backup.CompressionMethod = LEGACY_COMPRESSION_METHOD - } return backup, nil } -func saveBackup(bsDriver BackupStoreDriver, backup *Backup) error { +func saveBackup(backup *Backup, bsDriver BackupStoreDriver) error { if backup.VolumeName == "" { return fmt.Errorf("missing volume specifier for backup: %v", backup.Name) } filePath := getBackupConfigPath(backup.Name, backup.VolumeName) - return SaveConfigInBackupStore(bsDriver, filePath, backup) + if err := saveConfigInBackupStore(filePath, bsDriver, backup); err != nil { + return err + } + return nil } func removeBackup(backup *Backup, bsDriver BackupStoreDriver) error { @@ -282,6 +241,6 @@ func removeBackup(backup *Backup, bsDriver BackupStoreDriver) error { if err := bsDriver.Remove(filePath); err != nil { return err } - log.Infof("Removed %v on backupstore", filePath) + log.Debugf("Removed %v on backupstore", filePath) return nil } diff --git a/vendor/github.com/longhorn/backupstore/deltablock.go b/vendor/github.com/longhorn/backupstore/deltablock.go index 99a8ecb94..28bd1b723 100644 --- a/vendor/github.com/longhorn/backupstore/deltablock.go +++ b/vendor/github.com/longhorn/backupstore/deltablock.go @@ -1,16 +1,12 @@ package backupstore import ( - "context" "fmt" "io" "os" "path/filepath" - "sync" - "syscall" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" . "github.com/longhorn/backupstore/logging" @@ -18,21 +14,19 @@ import ( ) type DeltaBackupConfig struct { - BackupName string - Volume *Volume - Snapshot *Snapshot - DestURL string - DeltaOps DeltaBlockBackupOperations - Labels map[string]string - ConcurrentLimit int32 + BackupName string + Volume *Volume + Snapshot *Snapshot + DestURL string + DeltaOps DeltaBlockBackupOperations + Labels map[string]string } type DeltaRestoreConfig struct { - BackupURL string - DeltaOps DeltaRestoreOperations - LastBackupName string - Filename string - ConcurrentLimit int32 + BackupURL string + DeltaOps DeltaRestoreOperations + LastBackupName string + Filename string } type BlockMapping struct { @@ -94,8 +88,7 @@ type DeltaRestoreOperations interface { } const ( - DEFAULT_BLOCK_SIZE = 2 * 1024 * 1024 - LEGACY_COMPRESSION_METHOD = "gzip" + DEFAULT_BLOCK_SIZE = 2097152 BLOCKS_DIRECTORY = "blocks" BLOCK_SEPARATE_LAYER1 = 2 @@ -106,9 +99,9 @@ const ( PROGRESS_PERCENTAGE_BACKUP_TOTAL = 100 ) -func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncremental bool, err error) { +func CreateDeltaBlockBackup(config *DeltaBackupConfig) (string, bool, error) { if config == nil { - return "", false, fmt.Errorf("invalid empty config for backup") + return "", false, fmt.Errorf("Invalid empty config for backup") } volume := config.Volume @@ -116,7 +109,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre destURL := config.DestURL deltaOps := config.DeltaOps if deltaOps == nil { - return "", false, fmt.Errorf("missing DeltaBlockBackupOperations") + return "", false, fmt.Errorf("Missing DeltaBlockBackupOperations") } bsDriver, err := GetBackupStoreDriver(destURL) @@ -134,18 +127,16 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre return "", false, err } - if err := addVolume(bsDriver, volume); err != nil { + if err := addVolume(volume, bsDriver); err != nil { return "", false, err } // Update volume from backupstore - volume, err = loadVolume(bsDriver, volume.Name) + volume, err = loadVolume(volume.Name, bsDriver) if err != nil { return "", false, err } - config.Volume.CompressionMethod = volume.CompressionMethod - if err := deltaOps.OpenSnapshot(snapshot.Name, volume.Name); err != nil { return "", false, err } @@ -153,7 +144,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre backupRequest := &backupRequest{} if volume.LastBackupName != "" { lastBackupName := volume.LastBackupName - var backup, err = loadBackup(bsDriver, lastBackupName, volume.Name) + var backup, err = loadBackup(lastBackupName, volume.Name, bsDriver) if err != nil { log.WithFields(logrus.Fields{ LogFieldReason: LogReasonFallback, @@ -162,9 +153,9 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre LogFieldBackup: lastBackupName, LogFieldVolume: volume.Name, LogFieldDestURL: destURL, - }).WithError(err).Info("Cannot find previous backup in backupstore") + }).Info("Cannot find previous backup in backupstore") } else if backup.SnapshotName == snapshot.Name { - // Generate full snapshot if the snapshot has been backed up last time + //Generate full snapshot if the snapshot has been backed up last time log.WithFields(logrus.Fields{ LogFieldReason: LogReasonFallback, LogFieldEvent: LogEventCompare, @@ -191,7 +182,6 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre LogFieldSnapshot: snapshot.Name, LogFieldLastSnapshot: backupRequest.getLastSnapshotName(), }).Debug("Generating snapshot changed blocks config") - delta, err := deltaOps.CompareSnapshot(snapshot.Name, backupRequest.getLastSnapshotName(), volume.Name) if err != nil { deltaOps.CloseSnapshot(snapshot.Name, volume.Name) @@ -223,14 +213,10 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre } deltaBackup := &Backup{ - Name: backupName, - VolumeName: volume.Name, - SnapshotName: snapshot.Name, - CompressionMethod: volume.CompressionMethod, - Blocks: []BlockMapping{}, - ProcessingBlocks: &ProcessingBlocks{ - blocks: map[string][]*BlockMapping{}, - }, + Name: backupName, + VolumeName: volume.Name, + SnapshotName: snapshot.Name, + Blocks: []BlockMapping{}, } // keep lock alive for async go routine. @@ -242,7 +228,7 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre defer deltaOps.CloseSnapshot(snapshot.Name, volume.Name) defer lock.Unlock() - if progress, backup, err := performBackup(bsDriver, config, delta, deltaBackup, backupRequest.lastBackup); err != nil { + if progress, backup, err := performBackup(config, delta, deltaBackup, backupRequest.lastBackup, bsDriver); err != nil { deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress, "", err.Error()) } else { deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress, backup, "") @@ -251,288 +237,69 @@ func CreateDeltaBlockBackup(config *DeltaBackupConfig) (backupID string, isIncre return deltaBackup.Name, backupRequest.isIncrementalBackup(), nil } -func populateMappings(bsDriver BackupStoreDriver, config *DeltaBackupConfig, deltaBackup *Backup, delta *Mappings) (<-chan Mapping, <-chan error) { - mappingChan := make(chan Mapping, 1) - errChan := make(chan error, 1) - - go func() { - defer close(mappingChan) - defer close(errChan) - - for _, mapping := range delta.Mappings { - mappingChan <- mapping - } - }() - - return mappingChan, errChan -} - -func getProgress(total, processed int64) int { - return int((float64(processed+1) / float64(total)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) -} - -func isBlockBeingProcessed(deltaBackup *Backup, offset int64, checksum string) bool { - processingBlocks := deltaBackup.ProcessingBlocks - - processingBlocks.Lock() - defer processingBlocks.Unlock() - - blockInfo := &BlockMapping{ - Offset: offset, - BlockChecksum: checksum, - } - if _, ok := processingBlocks.blocks[checksum]; ok { - processingBlocks.blocks[checksum] = append(processingBlocks.blocks[checksum], blockInfo) - return true - } - - processingBlocks.blocks[checksum] = []*BlockMapping{blockInfo} - return false -} - -func updateBlocksAndProgress(deltaBackup *Backup, progress *progress, checksum string, newBlock bool) { - processingBlocks := deltaBackup.ProcessingBlocks - - processingBlocks.Lock() - defer processingBlocks.Unlock() - - // Update deltaBackup.Blocks - blocks := processingBlocks.blocks[checksum] - for _, block := range blocks { - deltaBackup.Blocks = append(deltaBackup.Blocks, *block) - } - - // Update progress - func() { - progress.Lock() - defer progress.Unlock() - - if newBlock { - progress.newBlockCounts++ - } - progress.processedBlockCounts += int64(len(blocks)) - progress.progress = getProgress(progress.totalBlockCounts, progress.processedBlockCounts) - }() - - delete(processingBlocks.blocks, checksum) -} - -func backupBlock(bsDriver BackupStoreDriver, config *DeltaBackupConfig, - deltaBackup *Backup, offset int64, block []byte, progress *progress) error { - var err error - newBlock := false - volume := config.Volume - snapshot := config.Snapshot - deltaOps := config.DeltaOps - - checksum := util.GetChecksum(block) - - // This prevents multiple goroutines from trying to upload blocks that contain identical contents - // with the same checksum but different offsets). - // After uploading, `bsDriver.FileExists(blkFile)` is used to avoid repeat uploading. - if isBlockBeingProcessed(deltaBackup, offset, checksum) { - return nil - } - - defer func() { - if err != nil { - return - } - deltaBackup.Lock() - defer deltaBackup.Unlock() - updateBlocksAndProgress(deltaBackup, progress, checksum, newBlock) - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress.progress, "", "") - }() - - blkFile := getBlockFilePath(volume.Name, checksum) - if bsDriver.FileExists(blkFile) { - log.Debugf("Found existing block matching at %v", blkFile) - return nil - } +// performBackup if lastBackup is present we will do an incremental backup +func performBackup(config *DeltaBackupConfig, delta *Mappings, deltaBackup *Backup, lastBackup *Backup, + bsDriver BackupStoreDriver) (int, string, error) { - log.Debugf("Creating new block file at %v", blkFile) - newBlock = true - rs, err := util.CompressData(deltaBackup.CompressionMethod, block) - if err != nil { - return err + // create an in progress backup config file + if err := saveBackup(&Backup{Name: deltaBackup.Name, VolumeName: deltaBackup.VolumeName, + CreatedTime: ""}, bsDriver); err != nil { + return 0, "", err } - return bsDriver.Write(blkFile, rs) -} - -func backupMapping(bsDriver BackupStoreDriver, config *DeltaBackupConfig, - deltaBackup *Backup, blockSize int64, mapping Mapping, progress *progress) error { volume := config.Volume snapshot := config.Snapshot + destURL := config.DestURL deltaOps := config.DeltaOps - block := make([]byte, DEFAULT_BLOCK_SIZE) - blkCounts := mapping.Size / blockSize - - for i := int64(0); i < blkCounts; i++ { - log.Debugf("Backup for %v: segment %+v, blocks %v/%v", snapshot.Name, mapping, i+1, blkCounts) - offset := mapping.Offset + i*blockSize - if err := deltaOps.ReadSnapshot(snapshot.Name, volume.Name, offset, block); err != nil { - logrus.WithError(err).Errorf("Failed to read volume %v snapshot %v block at offset %v size %v", - volume.Name, snapshot.Name, offset, len(block)) - return err - } - - if err := backupBlock(bsDriver, config, deltaBackup, offset, block, progress); err != nil { - logrus.WithError(err).Errorf("Failed to back up volume %v snapshot %v block at offset %v size %v", - volume.Name, snapshot.Name, offset, len(block)) - return err - } - } - - return nil -} - -func backupMappings(ctx context.Context, bsDriver BackupStoreDriver, config *DeltaBackupConfig, - deltaBackup *Backup, blockSize int64, progress *progress, in <-chan Mapping) <-chan error { - errChan := make(chan error, 1) - - go func() { - defer close(errChan) - for { - select { - case <-ctx.Done(): - return - case mapping, open := <-in: - if !open { - return - } - - if err := backupMapping(bsDriver, config, deltaBackup, blockSize, mapping, progress); err != nil { - errChan <- err - return - } - } - } - }() - - return errChan -} - -type progress struct { - sync.Mutex - - totalBlockCounts int64 - processedBlockCounts int64 - newBlockCounts int64 - - progress int -} - -func getTotalBackupBlockCounts(delta *Mappings) (int64, error) { - totalBlockCounts := int64(0) - for _, d := range delta.Mappings { + var progress int + mCounts := len(delta.Mappings) + newBlocks := int64(0) + for m, d := range delta.Mappings { if d.Size%delta.BlockSize != 0 { - return 0, fmt.Errorf("mapping's size %v is not multiples of backup block size %v", + return progress, "", fmt.Errorf("Mapping's size %v is not multiples of backup block size %v", d.Size, delta.BlockSize) } - totalBlockCounts += d.Size / delta.BlockSize - } - return totalBlockCounts, nil -} - -// mergeErrorChannels will merge all error channels into a single error out channel. -// the error out channel will be closed once the ctx is done or all error channels are closed -// if there is an error on one of the incoming channels the error will be relayed. -func mergeErrorChannels(ctx context.Context, channels ...<-chan error) <-chan error { - var wg sync.WaitGroup - wg.Add(len(channels)) - - out := make(chan error, len(channels)) - output := func(c <-chan error) { - defer wg.Done() - select { - case err, ok := <-c: - if ok { - out <- err + block := make([]byte, DEFAULT_BLOCK_SIZE) + blkCounts := d.Size / delta.BlockSize + for i := int64(0); i < blkCounts; i++ { + offset := d.Offset + i*delta.BlockSize + log.Debugf("Backup for %v: segment %v/%v, blocks %v/%v", snapshot.Name, m+1, mCounts, i+1, blkCounts) + err := deltaOps.ReadSnapshot(snapshot.Name, volume.Name, offset, block) + if err != nil { + return progress, "", err + } + checksum := util.GetChecksum(block) + blkFile := getBlockFilePath(volume.Name, checksum) + if bsDriver.FileExists(blkFile) { + blockMapping := BlockMapping{ + Offset: offset, + BlockChecksum: checksum, + } + deltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping) + log.Debugf("Found existed block match at %v", blkFile) + continue } - return - case <-ctx.Done(): - return - } - } - - for _, c := range channels { - go output(c) - } - go func() { - wg.Wait() - close(out) - }() - return out -} + rs, err := util.CompressData(block) + if err != nil { + return progress, "", err + } -func sortBackupBlocks(blocks []BlockMapping, volumeSize, blockSize int64) []BlockMapping { - sortedBlocks := make([]string, volumeSize/blockSize) - for _, block := range blocks { - i := block.Offset / blockSize - sortedBlocks[i] = block.BlockChecksum - } + if err := bsDriver.Write(blkFile, rs); err != nil { + return progress, "", err + } + log.Debugf("Created new block file at %v", blkFile) - blockMappings := []BlockMapping{} - for i, checksum := range sortedBlocks { - if checksum != "" { - blockMappings = append(blockMappings, BlockMapping{ - Offset: int64(i) * blockSize, + newBlocks++ + blockMapping := BlockMapping{ + Offset: offset, BlockChecksum: checksum, - }) + } + deltaBackup.Blocks = append(deltaBackup.Blocks, blockMapping) } - } - - return blockMappings -} - -// performBackup if lastBackup is present we will do an incremental backup -func performBackup(bsDriver BackupStoreDriver, config *DeltaBackupConfig, delta *Mappings, deltaBackup *Backup, lastBackup *Backup) (int, string, error) { - volume := config.Volume - snapshot := config.Snapshot - destURL := config.DestURL - concurrentLimit := config.ConcurrentLimit - - // create an in progress backup config file - if err := saveBackup(bsDriver, &Backup{ - Name: deltaBackup.Name, - VolumeName: deltaBackup.VolumeName, - CompressionMethod: volume.CompressionMethod, - CreatedTime: "", - }); err != nil { - return 0, "", err - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - totalBlockCounts, err := getTotalBackupBlockCounts(delta) - if err != nil { - return 0, "", err - } - logrus.Infof("Volume %v Snapshot %v is consist of %v mappings and %v blocks", - volume.Name, snapshot.Name, len(delta.Mappings), totalBlockCounts) - - progress := &progress{ - totalBlockCounts: totalBlockCounts, - } - - mappingChan, errChan := populateMappings(bsDriver, config, deltaBackup, delta) - - errorChans := []<-chan error{errChan} - for i := 0; i < int(concurrentLimit); i++ { - errorChans = append(errorChans, backupMappings(ctx, bsDriver, config, - deltaBackup, delta.BlockSize, progress, mappingChan)) - } - - mergedErrChan := mergeErrorChannels(ctx, errorChans...) - err = <-mergedErrChan - - if err != nil { - logrus.WithError(err).Errorf("Failed to backup volume %v snapshot %v", volume.Name, snapshot.Name) - return progress.progress, "", err + progress = int((float64(m+1) / float64(mCounts)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) + deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, progress, "", "") } log.WithFields(logrus.Fields{ @@ -540,10 +307,7 @@ func performBackup(bsDriver BackupStoreDriver, config *DeltaBackupConfig, delta LogFieldEvent: LogEventBackup, LogFieldObject: LogObjectSnapshot, LogFieldSnapshot: snapshot.Name, - }).Infof("Created snapshot changed blocks: %v mappings, %v blocks and %v new blocks", - len(delta.Mappings), progress.totalBlockCounts, progress.newBlockCounts) - - deltaBackup.Blocks = sortBackupBlocks(deltaBackup.Blocks, volume.Size, delta.BlockSize) + }).Debug("Created snapshot changed blocks") backup := mergeSnapshotMap(deltaBackup, lastBackup) backup.SnapshotName = snapshot.Name @@ -553,27 +317,26 @@ func performBackup(bsDriver BackupStoreDriver, config *DeltaBackupConfig, delta backup.Labels = config.Labels backup.IsIncremental = lastBackup != nil - if err := saveBackup(bsDriver, backup); err != nil { - return progress.progress, "", err + if err := saveBackup(backup, bsDriver); err != nil { + return progress, "", err } - volume, err = loadVolume(bsDriver, volume.Name) + volume, err := loadVolume(volume.Name, bsDriver) if err != nil { - return progress.progress, "", err + return progress, "", err } volume.LastBackupName = backup.Name volume.LastBackupAt = backup.SnapshotCreatedAt - volume.BlockCount = volume.BlockCount + progress.newBlockCounts + volume.BlockCount = volume.BlockCount + newBlocks // The volume may be expanded volume.Size = config.Volume.Size volume.Labels = config.Labels volume.BackingImageName = config.Volume.BackingImageName volume.BackingImageChecksum = config.Volume.BackingImageChecksum - volume.CompressionMethod = config.Volume.CompressionMethod - if err := saveVolume(bsDriver, volume); err != nil { - return progress.progress, "", err + if err := saveVolume(volume, bsDriver); err != nil { + return progress, "", err } return PROGRESS_PERCENTAGE_BACKUP_TOTAL, EncodeBackupURL(backup.Name, volume.Name, destURL), nil @@ -584,11 +347,10 @@ func mergeSnapshotMap(deltaBackup, lastBackup *Backup) *Backup { return deltaBackup } backup := &Backup{ - Name: deltaBackup.Name, - VolumeName: deltaBackup.VolumeName, - SnapshotName: deltaBackup.SnapshotName, - CompressionMethod: deltaBackup.CompressionMethod, - Blocks: []BlockMapping{}, + Name: deltaBackup.Name, + VolumeName: deltaBackup.VolumeName, + SnapshotName: deltaBackup.SnapshotName, + Blocks: []BlockMapping{}, } var d, l int for d, l = 0, 0; d < len(deltaBackup.Blocks) && l < len(lastBackup.Blocks); { @@ -613,7 +375,7 @@ func mergeSnapshotMap(deltaBackup, lastBackup *Backup) *Backup { LogFieldObject: LogObjectBackup, LogFieldBackup: deltaBackup.Name, LogFieldLastBackup: lastBackup.Name, - }).Debugf("Merge backup blocks") + }).Debugf("merge backup blocks") if d == len(deltaBackup.Blocks) { backup.Blocks = append(backup.Blocks, lastBackup.Blocks[l:]...) } else { @@ -630,7 +392,6 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { volDevName := config.Filename backupURL := config.BackupURL - concurrentLimit := config.ConcurrentLimit deltaOps := config.DeltaOps if deltaOps == nil { return fmt.Errorf("missing DeltaRestoreOperations") @@ -656,7 +417,7 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { return err } - vol, err := loadVolume(bsDriver, srcVolumeName) + vol, err := loadVolume(srcVolumeName, bsDriver) if err != nil { return generateError(logrus.Fields{ LogFieldVolume: srcVolumeName, @@ -665,13 +426,13 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { } if vol.Size == 0 || vol.Size%DEFAULT_BLOCK_SIZE != 0 { - return fmt.Errorf("read invalid volume size %v", vol.Size) + return fmt.Errorf("Read invalid volume size %v", vol.Size) } if _, err := os.Stat(volDevName); err == nil { logrus.Warnf("File %s for the restore exists, will remove and re-create it", volDevName) - if err := os.RemoveAll(volDevName); err != nil { - return errors.Wrapf(err, "failed to clean up the existing file %v before restore", volDevName) + if err := os.Remove(volDevName); err != nil { + return fmt.Errorf("failed to clean up the existing file %v before restore: %v", volDevName, err) } } @@ -691,7 +452,7 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { return err } - backup, err := loadBackup(bsDriver, srcBackupName, srcVolumeName) + backup, err := loadBackup(srcBackupName, srcVolumeName, bsDriver) if err != nil { return err } @@ -714,10 +475,7 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { defer volDev.Close() defer lock.Unlock() - progress := &progress{ - totalBlockCounts: int64(len(backup.Blocks)), - } - + var progress int // This pre-truncate is to ensure the XFS speculatively // preallocates post-EOF blocks get reclaimed when volDev is // closed. @@ -726,27 +484,20 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { if stat.Mode()&os.ModeType == 0 { log.Debugf("Truncate %v to size %v", volDevName, vol.Size) if err := volDev.Truncate(vol.Size); err != nil { - deltaOps.UpdateRestoreStatus(volDevName, progress.progress, err) + deltaOps.UpdateRestoreStatus(volDevName, progress, err) return } } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - blockChan, errChan := populateBlocksForFullRestore(bsDriver, backup) - - errorChans := []<-chan error{errChan} - for i := 0; i < int(concurrentLimit); i++ { - errorChans = append(errorChans, restoreBlocks(ctx, bsDriver, config, srcVolumeName, blockChan, progress)) - } - - mergedErrChan := mergeErrorChannels(ctx, errorChans...) - err = <-mergedErrChan - if err != nil { - logrus.WithError(err).Errorf("Failed to delta restore volume %v backup %v", srcVolumeName, backup.Name) - deltaOps.UpdateRestoreStatus(volDevName, progress.progress, err) - return + blkCounts := len(backup.Blocks) + for i, block := range backup.Blocks { + log.Debugf("Restore for %v: block %v, %v/%v", volDevName, block.BlockChecksum, i+1, blkCounts) + if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, block); err != nil { + deltaOps.UpdateRestoreStatus(volDevName, progress, err) + return + } + progress = int((float64(i+1) / float64(blkCounts)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) + deltaOps.UpdateRestoreStatus(volDevName, progress, err) } deltaOps.UpdateRestoreStatus(volDevName, PROGRESS_PERCENTAGE_BACKUP_TOTAL, nil) @@ -755,22 +506,24 @@ func RestoreDeltaBlockBackup(config *DeltaRestoreConfig) error { return nil } -func restoreBlockToFile(bsDriver BackupStoreDriver, volumeName string, volDev *os.File, decompression string, blk BlockMapping) error { +func restoreBlockToFile(volumeName string, volDev *os.File, bsDriver BackupStoreDriver, blk BlockMapping) error { blkFile := getBlockFilePath(volumeName, blk.BlockChecksum) rc, err := bsDriver.Read(blkFile) if err != nil { return err } defer rc.Close() - r, err := util.DecompressAndVerify(decompression, rc, blk.BlockChecksum) + r, err := util.DecompressAndVerify(rc, blk.BlockChecksum) if err != nil { return err } if _, err := volDev.Seek(blk.Offset, 0); err != nil { return err } - _, err = io.CopyN(volDev, r, DEFAULT_BLOCK_SIZE) - return err + if _, err := io.CopyN(volDev, r, DEFAULT_BLOCK_SIZE); err != nil { + return err + } + return nil } func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { @@ -805,7 +558,7 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { } defer lock.Unlock() - vol, err := loadVolume(bsDriver, srcVolumeName) + vol, err := loadVolume(srcVolumeName, bsDriver) if err != nil { return generateError(logrus.Fields{ LogFieldVolume: srcVolumeName, @@ -826,7 +579,7 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { if _, err := os.Stat(volDevName); err == nil { logrus.Warnf("File %s for the incremental restore exists, will remove and re-create it", volDevName) if err := os.Remove(volDevName); err != nil { - return errors.Wrapf(err, "failed to clean up the existing file %v before incremental restore", volDevName) + return fmt.Errorf("failed to clean up the existing file %v before incremental restore: %v", volDevName, err) } } @@ -846,11 +599,11 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { return err } - lastBackup, err := loadBackup(bsDriver, lastBackupName, srcVolumeName) + lastBackup, err := loadBackup(lastBackupName, srcVolumeName, bsDriver) if err != nil { return err } - backup, err := loadBackup(bsDriver, srcBackupName, srcVolumeName) + backup, err := loadBackup(srcBackupName, srcVolumeName, bsDriver) if err != nil { return err } @@ -885,7 +638,7 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { } } - if err := performIncrementalRestore(bsDriver, config, srcVolumeName, volDevName, lastBackup, backup); err != nil { + if err := performIncrementalRestore(srcVolumeName, volDev, lastBackup, backup, bsDriver, config); err != nil { deltaOps.UpdateRestoreStatus(volDevName, 0, err) return } @@ -895,182 +648,63 @@ func RestoreDeltaBlockBackupIncrementally(config *DeltaRestoreConfig) error { return nil } -type Block struct { - offset int64 - blockChecksum string - compressionMethod string - isZeroBlock bool -} +func performIncrementalRestore(srcVolumeName string, volDev *os.File, lastBackup *Backup, backup *Backup, + bsDriver BackupStoreDriver, config *DeltaRestoreConfig) error { + var progress int + volDevName := config.Filename + deltaOps := config.DeltaOps -func populateBlocksForIncrementalRestore(bsDriver BackupStoreDriver, lastBackup, backup *Backup) (<-chan *Block, <-chan error) { - blockChan := make(chan *Block, 10) - errChan := make(chan error, 1) + emptyBlock := make([]byte, DEFAULT_BLOCK_SIZE) + total := len(backup.Blocks) + len(lastBackup.Blocks) - go func() { - defer close(blockChan) - defer close(errChan) - - for b, l := 0, 0; b < len(backup.Blocks) || l < len(lastBackup.Blocks); { - if b >= len(backup.Blocks) { - blockChan <- &Block{ - offset: lastBackup.Blocks[l].Offset, - isZeroBlock: true, - } - l++ - continue - } - if l >= len(lastBackup.Blocks) { - blockChan <- &Block{ - offset: backup.Blocks[b].Offset, - blockChecksum: backup.Blocks[b].BlockChecksum, - compressionMethod: backup.CompressionMethod, - } - b++ - continue - } - - bB := backup.Blocks[b] - lB := lastBackup.Blocks[l] - if bB.Offset == lB.Offset { - if bB.BlockChecksum != lB.BlockChecksum { - blockChan <- &Block{ - offset: bB.Offset, - blockChecksum: bB.BlockChecksum, - compressionMethod: backup.CompressionMethod, - } - } - b++ - l++ - } else if bB.Offset < lB.Offset { - blockChan <- &Block{ - offset: bB.Offset, - blockChecksum: bB.BlockChecksum, - compressionMethod: backup.CompressionMethod, - } - b++ - } else { - blockChan <- &Block{ - offset: lB.Offset, - isZeroBlock: true, - } - l++ + for b, l := 0, 0; b < len(backup.Blocks) || l < len(lastBackup.Blocks); { + if b >= len(backup.Blocks) { + if err := fillBlockToFile(&emptyBlock, volDev, lastBackup.Blocks[l].Offset); err != nil { + return err } + l++ + continue } - }() - - return blockChan, errChan -} - -func populateBlocksForFullRestore(bsDriver BackupStoreDriver, backup *Backup) (<-chan *Block, <-chan error) { - blockChan := make(chan *Block, 10) - errChan := make(chan error, 1) - - go func() { - defer close(blockChan) - defer close(errChan) - - for _, block := range backup.Blocks { - blockChan <- &Block{ - offset: block.Offset, - blockChecksum: block.BlockChecksum, - compressionMethod: backup.CompressionMethod, + if l >= len(lastBackup.Blocks) { + if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, backup.Blocks[b]); err != nil { + return err } + b++ + continue } - }() - - return blockChan, errChan -} - -func restoreBlock(bsDriver BackupStoreDriver, config *DeltaRestoreConfig, - volumeName string, volDev *os.File, block *Block, progress *progress) error { - deltaOps := config.DeltaOps - - defer func() { - progress.Lock() - defer progress.Unlock() - - progress.processedBlockCounts++ - progress.progress = getProgress(progress.totalBlockCounts, progress.processedBlockCounts) - deltaOps.UpdateRestoreStatus(volumeName, progress.progress, nil) - }() - - if block.isZeroBlock { - return fillZeros(volDev, block.offset, DEFAULT_BLOCK_SIZE) - } - - return restoreBlockToFile(bsDriver, volumeName, volDev, block.compressionMethod, - BlockMapping{ - Offset: block.offset, - BlockChecksum: block.blockChecksum, - }) -} - -func restoreBlocks(ctx context.Context, bsDriver BackupStoreDriver, config *DeltaRestoreConfig, - volumeName string, in <-chan *Block, progress *progress) <-chan error { - errChan := make(chan error, 1) - go func() { - defer close(errChan) - - volDevName := config.Filename - volDev, err := os.OpenFile(volDevName, os.O_RDWR, 0666) - if err != nil { - errChan <- err - return - } - defer volDev.Close() - - for { - select { - case <-ctx.Done(): - return - case block, open := <-in: - if !open { - return - } - - if err := restoreBlock(bsDriver, config, volumeName, volDev, block, progress); err != nil { - errChan <- err - return + bB := backup.Blocks[b] + lB := lastBackup.Blocks[l] + if bB.Offset == lB.Offset { + if bB.BlockChecksum != lB.BlockChecksum { + if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, bB); err != nil { + return err } } + b++ + l++ + } else if bB.Offset < lB.Offset { + if err := restoreBlockToFile(srcVolumeName, volDev, bsDriver, bB); err != nil { + return err + } + b++ + } else { + if err := fillBlockToFile(&emptyBlock, volDev, lB.Offset); err != nil { + return err + } + l++ } - }() - - return errChan -} - -func performIncrementalRestore(bsDriver BackupStoreDriver, config *DeltaRestoreConfig, - srcVolumeName, volDevName string, lastBackup *Backup, backup *Backup) error { - var err error - concurrentLimit := config.ConcurrentLimit - - progress := &progress{ - totalBlockCounts: int64(len(backup.Blocks) + len(lastBackup.Blocks)), + progress = int((float64(b+l+2) / float64(total)) * PROGRESS_PERCENTAGE_BACKUP_SNAPSHOT) + deltaOps.UpdateRestoreStatus(volDevName, progress, nil) } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - blockChan, errChan := populateBlocksForIncrementalRestore(bsDriver, lastBackup, backup) - - errorChans := []<-chan error{errChan} - for i := 0; i < int(concurrentLimit); i++ { - errorChans = append(errorChans, restoreBlocks(ctx, bsDriver, config, srcVolumeName, blockChan, progress)) - } - - mergedErrChan := mergeErrorChannels(ctx, errorChans...) - err = <-mergedErrChan - - if err != nil { - logrus.WithError(err).Errorf("Failed to incrementally restore volume %v backup %v", srcVolumeName, backup.Name) - } - - return err + return nil } -func fillZeros(volDev *os.File, offset, length int64) error { - return syscall.Fallocate(int(volDev.Fd()), 0, offset, length) +func fillBlockToFile(block *[]byte, volDev *os.File, offset int64) error { + if _, err := volDev.WriteAt(*block, offset); err != nil { + return err + } + return nil } func DeleteBackupVolume(volumeName string, destURL string) error { @@ -1087,14 +721,18 @@ func DeleteBackupVolume(volumeName string, destURL string) error { return err } defer lock.Unlock() - return removeVolume(volumeName, bsDriver) + if err := removeVolume(volumeName, bsDriver); err != nil { + return err + } + return nil } func checkBlockReferenceCount(blockInfos map[string]*BlockInfo, backup *Backup, volumeName string, driver BackupStoreDriver) { for _, block := range backup.Blocks { info, known := blockInfos[block.BlockChecksum] if !known { - log.Errorf("Backup %v refers to unknown block %v", backup.Name, block.BlockChecksum) + log.Errorf("backup %v refers to unknown block %v", + backup.Name, block.BlockChecksum) info = &BlockInfo{checksum: block.BlockChecksum} blockInfos[block.BlockChecksum] = info } @@ -1112,12 +750,12 @@ func getLatestBackup(backup *Backup, lastBackup *Backup) error { backupTime, err := time.Parse(time.RFC3339, backup.SnapshotCreatedAt) if err != nil { - return errors.Wrapf(err, "cannot parse backup %v time %v", backup.Name, backup.SnapshotCreatedAt) + return fmt.Errorf("Cannot parse backup %v time %v due to %v", backup.Name, backup.SnapshotCreatedAt, err) } lastBackupTime, err := time.Parse(time.RFC3339, lastBackup.SnapshotCreatedAt) if err != nil { - return errors.Wrapf(err, "cannot parse last backup %v time %v", lastBackup.Name, lastBackup.SnapshotCreatedAt) + return fmt.Errorf("Cannot parse last backup %v time %v due to %v", lastBackup.Name, lastBackup.SnapshotCreatedAt, err) } if backupTime.After(lastBackupTime) { @@ -1152,9 +790,9 @@ func DeleteDeltaBlockBackup(backupURL string) error { defer lock.Unlock() // If we fail to load the backup we still want to proceed with the deletion of the backup file - backupToBeDeleted, err := loadBackup(bsDriver, backupName, volumeName) + backupToBeDeleted, err := loadBackup(backupName, volumeName, bsDriver) if err != nil { - log.WithError(err).Warn("Failed to load to be deleted backup") + log.WithError(err).Warn("failed to load to be deleted backup") backupToBeDeleted = &Backup{ Name: backupName, VolumeName: volumeName, @@ -1167,9 +805,9 @@ func DeleteDeltaBlockBackup(backupURL string) error { } log.Info("Removed backup for volume") - v, err := loadVolume(bsDriver, volumeName) + v, err := loadVolume(volumeName, bsDriver) if err != nil { - return errors.Wrap(err, "cannot find volume in backupstore") + return fmt.Errorf("Cannot find volume in backupstore due to: %v", err) } updateLastBackup := false if backupToBeDeleted.Name == v.LastBackupName { @@ -1180,14 +818,14 @@ func DeleteDeltaBlockBackup(backupURL string) error { log.Debug("GC started") deleteBlocks := true - backupNames, err := getBackupNamesForVolume(bsDriver, volumeName) + backupNames, err := getBackupNamesForVolume(volumeName, bsDriver) if err != nil { - log.WithError(err).Warn("Failed to load backup names, skip block deletion") + log.WithError(err).Warn("failed to load backup names, skip block deletion") deleteBlocks = false } blockInfos := make(map[string]*BlockInfo) - blockNames, err := getBlockNamesForVolume(bsDriver, volumeName) + blockNames, err := getBlockNamesForVolume(volumeName, bsDriver) if err != nil { return err } @@ -1202,9 +840,9 @@ func DeleteDeltaBlockBackup(backupURL string) error { lastBackup := &Backup{} for _, name := range backupNames { log := log.WithField("backup", name) - backup, err := loadBackup(bsDriver, name, volumeName) + backup, err := loadBackup(name, volumeName, bsDriver) if err != nil { - log.WithError(err).Warn("Failed to load backup, skip block deletion") + log.WithError(err).Warn("failed to load backup, skip block deletion") deleteBlocks = false break } @@ -1223,7 +861,7 @@ func DeleteDeltaBlockBackup(backupURL string) error { if updateLastBackup { err := getLatestBackup(backup, lastBackup) if err != nil { - log.WithError(err).Warn("Failed to find last backup, skip block deletion") + log.WithError(err).Warn("failed to find last backup, skip block deletion") deleteBlocks = false break } @@ -1234,14 +872,14 @@ func DeleteDeltaBlockBackup(backupURL string) error { v.LastBackupName = lastBackup.Name v.LastBackupAt = lastBackup.SnapshotCreatedAt } - if err := saveVolume(bsDriver, v); err != nil { + if err := saveVolume(v, bsDriver); err != nil { return err } } // check if there have been new backups created while we where processing prevBackupNames := backupNames - backupNames, err = getBackupNamesForVolume(bsDriver, volumeName) + backupNames, err = getBackupNamesForVolume(volumeName, bsDriver) if err != nil || !util.UnorderedEqual(prevBackupNames, backupNames) { log.Info("Found new backups for volume, skip block deletion") deleteBlocks = false @@ -1249,14 +887,14 @@ func DeleteDeltaBlockBackup(backupURL string) error { // only delete the blocks if it is safe to do so if deleteBlocks { - if err := cleanupBlocks(bsDriver, blockInfos, volumeName); err != nil { + if err := cleanupBlocks(blockInfos, volumeName, bsDriver); err != nil { return err } } return nil } -func cleanupBlocks(driver BackupStoreDriver, blockMap map[string]*BlockInfo, volume string) error { +func cleanupBlocks(blockMap map[string]*BlockInfo, volume string, driver BackupStoreDriver) error { var deletionFailures []string activeBlockCount := int64(0) deletedBlockCount := int64(0) @@ -1266,7 +904,7 @@ func cleanupBlocks(driver BackupStoreDriver, blockMap map[string]*BlockInfo, vol deletionFailures = append(deletionFailures, blk.checksum) continue } - log.Debugf("Deleted block %v for volume %v", blk.checksum, volume) + log.Debugf("deleted block %v for volume %v", blk.checksum, volume) deletedBlockCount++ } else if isBlockReferenced(blk) && isBlockPresent(blk) { activeBlockCount++ @@ -1281,17 +919,20 @@ func cleanupBlocks(driver BackupStoreDriver, blockMap map[string]*BlockInfo, vol log.Debugf("Removed %v unused blocks for volume %v", deletedBlockCount, volume) log.Debug("GC completed") - v, err := loadVolume(driver, volume) + v, err := loadVolume(volume, driver) if err != nil { return err } // update the block count to what we actually have on disk that is in use v.BlockCount = activeBlockCount - return saveVolume(driver, v) + if err := saveVolume(v, driver); err != nil { + return err + } + return nil } -func getBlockNamesForVolume(driver BackupStoreDriver, volumeName string) ([]string, error) { +func getBlockNamesForVolume(volumeName string, driver BackupStoreDriver) ([]string, error) { names := []string{} blockPathBase := getBlockPath(volumeName) lv1Dirs, err := driver.List(blockPathBase) diff --git a/vendor/github.com/longhorn/backupstore/driver.go b/vendor/github.com/longhorn/backupstore/driver.go index 443a13979..d3079eaf7 100644 --- a/vendor/github.com/longhorn/backupstore/driver.go +++ b/vendor/github.com/longhorn/backupstore/driver.go @@ -35,10 +35,6 @@ var ( log = logrus.WithFields(logrus.Fields{"pkg": "backupstore"}) ) -func GetLog() logrus.FieldLogger { - return log -} - func generateError(fields logrus.Fields, format string, v ...interface{}) error { return ErrorWithFields("backupstore", fields, format, v...) } @@ -65,14 +61,14 @@ func unregisterDriver(kind string) error { func GetBackupStoreDriver(destURL string) (BackupStoreDriver, error) { if destURL == "" { - return nil, fmt.Errorf("destination URL hasn't been specified") + return nil, fmt.Errorf("Destination URL hasn't been specified") } u, err := url.Parse(destURL) if err != nil { return nil, err } if _, exists := initializers[u.Scheme]; !exists { - return nil, fmt.Errorf("driver %v is not supported", u.Scheme) + return nil, fmt.Errorf("Driver %v is not supported!", u.Scheme) } return initializers[u.Scheme](destURL) } diff --git a/vendor/github.com/longhorn/backupstore/fsops/fsops.go b/vendor/github.com/longhorn/backupstore/fsops/fsops.go index 157ff9db1..fafe6d540 100644 --- a/vendor/github.com/longhorn/backupstore/fsops/fsops.go +++ b/vendor/github.com/longhorn/backupstore/fsops/fsops.go @@ -29,7 +29,10 @@ func NewFileSystemOperator(ops FileSystemOps) *FileSystemOperator { } func (f *FileSystemOperator) preparePath(file string) error { - return os.MkdirAll(filepath.Dir(f.LocalPath(file)), os.ModeDir|0700) + if err := os.MkdirAll(filepath.Dir(f.LocalPath(file)), os.ModeDir|0700); err != nil { + return err + } + return nil } func (f *FileSystemOperator) FileSize(filePath string) int64 { @@ -137,10 +140,16 @@ func (f *FileSystemOperator) Upload(src, dst string) error { return err } _, err = util.Execute("mv", []string{f.LocalPath(tmpDst), f.LocalPath(dst)}) - return err + if err != nil { + return err + } + return nil } func (f *FileSystemOperator) Download(src, dst string) error { _, err := util.Execute("cp", []string{f.LocalPath(src), dst}) - return err + if err != nil { + return err + } + return nil } diff --git a/vendor/github.com/longhorn/backupstore/go.mod b/vendor/github.com/longhorn/backupstore/go.mod index f0021811e..79f8d8cc9 100644 --- a/vendor/github.com/longhorn/backupstore/go.mod +++ b/vendor/github.com/longhorn/backupstore/go.mod @@ -6,13 +6,11 @@ require ( github.com/aws/aws-sdk-go v1.34.2 github.com/google/uuid v1.3.0 github.com/honestbee/jobq v1.0.2 - github.com/pierrec/lz4/v4 v4.1.17 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.3.0 github.com/spf13/afero v1.5.1 github.com/stretchr/testify v1.7.0 github.com/urfave/cli v1.14.0 - golang.org/x/net v0.0.0-20200202094626-16171245cfb2 gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/vendor/github.com/longhorn/backupstore/go.sum b/vendor/github.com/longhorn/backupstore/go.sum index 57beb2cb0..27e7cede9 100644 --- a/vendor/github.com/longhorn/backupstore/go.sum +++ b/vendor/github.com/longhorn/backupstore/go.sum @@ -13,8 +13,6 @@ github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeY github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= -github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/longhorn/backupstore/http/client.go b/vendor/github.com/longhorn/backupstore/http/client.go index 85fcdd5f0..f2300e695 100644 --- a/vendor/github.com/longhorn/backupstore/http/client.go +++ b/vendor/github.com/longhorn/backupstore/http/client.go @@ -5,9 +5,6 @@ import ( "crypto/x509" "fmt" "net/http" - "net/url" - - "golang.org/x/net/http/httpproxy" ) func getSystemCerts() *x509.CertPool { @@ -46,9 +43,6 @@ func GetClient(insecure bool, customCerts []byte) (*http.Client, error) { InsecureSkipVerify: insecure, RootCAs: certs, } - customTransport.Proxy = func(request *http.Request) (*url.URL, error) { - return httpproxy.FromEnvironment().ProxyFunc()(request.URL) - } client := &http.Client{Transport: customTransport} return client, nil } diff --git a/vendor/github.com/longhorn/backupstore/inspect.go b/vendor/github.com/longhorn/backupstore/inspect.go index 7c196fbad..080b691f5 100644 --- a/vendor/github.com/longhorn/backupstore/inspect.go +++ b/vendor/github.com/longhorn/backupstore/inspect.go @@ -19,7 +19,7 @@ func InspectVolume(volumeURL string) (*VolumeInfo, error) { return nil, err } - volume, err := loadVolume(driver, volumeName) + volume, err := loadVolume(volumeName, driver) if err != nil { return nil, err } @@ -38,12 +38,12 @@ func InspectBackup(backupURL string) (*BackupInfo, error) { return nil, err } - volume, err := loadVolume(driver, volumeName) + volume, err := loadVolume(volumeName, driver) if err != nil { return nil, err } - backup, err := loadBackup(driver, backupName, volumeName) + backup, err := loadBackup(backupName, volumeName, driver) if err != nil { log.WithFields(logrus.Fields{ LogFieldReason: LogReasonFallback, @@ -79,15 +79,14 @@ func fillVolumeInfo(volume *Volume) *VolumeInfo { func fillBackupInfo(backup *Backup, destURL string) *BackupInfo { return &BackupInfo{ - Name: backup.Name, - URL: EncodeBackupURL(backup.Name, backup.VolumeName, destURL), - SnapshotName: backup.SnapshotName, - SnapshotCreated: backup.SnapshotCreatedAt, - Created: backup.CreatedTime, - Size: backup.Size, - Labels: backup.Labels, - IsIncremental: backup.IsIncremental, - CompressionMethod: backup.CompressionMethod, + Name: backup.Name, + URL: EncodeBackupURL(backup.Name, backup.VolumeName, destURL), + SnapshotName: backup.SnapshotName, + SnapshotCreated: backup.SnapshotCreatedAt, + Created: backup.CreatedTime, + Size: backup.Size, + Labels: backup.Labels, + IsIncremental: backup.IsIncremental, } } diff --git a/vendor/github.com/longhorn/backupstore/list.go b/vendor/github.com/longhorn/backupstore/list.go index 0d2b20737..dde1babf2 100644 --- a/vendor/github.com/longhorn/backupstore/list.go +++ b/vendor/github.com/longhorn/backupstore/list.go @@ -32,15 +32,14 @@ type VolumeInfo struct { } type BackupInfo struct { - Name string - URL string - SnapshotName string - SnapshotCreated string - Created string - Size int64 `json:",string"` - Labels map[string]string - IsIncremental bool - CompressionMethod string `json:",omitempty"` + Name string + URL string + SnapshotName string + SnapshotCreated string + Created string + Size int64 `json:",string"` + Labels map[string]string + IsIncremental bool VolumeName string `json:",omitempty"` VolumeSize int64 `json:",string,omitempty"` @@ -50,17 +49,17 @@ type BackupInfo struct { Messages map[MessageType]string } -func addListVolume(driver BackupStoreDriver, volumeName string, volumeOnly bool) (*VolumeInfo, error) { +func addListVolume(volumeName string, driver BackupStoreDriver, volumeOnly bool) (*VolumeInfo, error) { if volumeName == "" { - return nil, fmt.Errorf("invalid empty volume Name") + return nil, fmt.Errorf("Invalid empty volume Name") } if !util.ValidateName(volumeName) { - return nil, fmt.Errorf("invalid volume name %v", volumeName) + return nil, fmt.Errorf("Invalid volume name %v", volumeName) } volumeInfo := &VolumeInfo{Messages: make(map[MessageType]string)} - if !volumeExists(driver, volumeName) { + if !volumeExists(volumeName, driver) { // If the backup volume folder exist but volume.cfg not exist // save the error in Messages field volumeInfo.Messages[MessageTypeError] = fmt.Sprintf("cannot find %v in backupstore", getVolumeFilePath(volumeName)) @@ -72,7 +71,7 @@ func addListVolume(driver BackupStoreDriver, volumeName string, volumeOnly bool) } // try to find all backups for this volume - backupNames, err := getBackupNamesForVolume(driver, volumeName) + backupNames, err := getBackupNamesForVolume(volumeName, driver) if err != nil { volumeInfo.Messages[MessageTypeError] = err.Error() return volumeInfo, nil @@ -109,7 +108,7 @@ func List(volumeName, destURL string, volumeOnly bool) (map[string]*VolumeInfo, var errs []string for _, volumeName := range volumeNames { - volumeInfo, err := addListVolume(driver, volumeName, volumeOnly) + volumeInfo, err := addListVolume(volumeName, driver, volumeOnly) if err != nil { errs = append(errs, err.Error()) continue diff --git a/vendor/github.com/longhorn/backupstore/lock.go b/vendor/github.com/longhorn/backupstore/lock.go index 4dcf03629..e5a708856 100644 --- a/vendor/github.com/longhorn/backupstore/lock.go +++ b/vendor/github.com/longhorn/backupstore/lock.go @@ -62,7 +62,7 @@ func (lock *FileLock) canAcquire() bool { canAcquire := true locks := getLocksForVolume(lock.volume, lock.driver) file := getLockFilePath(lock.volume, lock.Name) - log.WithField("lock", lock).Debugf("Trying to acquire lock %v", file) + log.WithField("lock", lock).Debugf("trying to acquire lock %v", file) log.Debugf("backupstore volume %v contains locks %v", lock.volume, locks) for _, serverLock := range locks { @@ -164,7 +164,7 @@ func (lock *FileLock) Unlock() error { func loadLock(volumeName string, name string, driver BackupStoreDriver) (*FileLock, error) { lock := &FileLock{} file := getLockFilePath(volumeName, name) - if err := LoadConfigInBackupStore(driver, file, lock); err != nil { + if err := loadConfigInBackupStore(file, driver, lock); err != nil { return nil, err } lock.serverTime = driver.FileTime(file) @@ -183,7 +183,7 @@ func removeLock(lock *FileLock) error { func saveLock(lock *FileLock) error { file := getLockFilePath(lock.volume, lock.Name) - if err := SaveConfigInBackupStore(lock.driver, file, lock); err != nil { + if err := saveConfigInBackupStore(file, lock.driver, lock); err != nil { return err } lock.serverTime = lock.driver.FileTime(file) @@ -226,7 +226,7 @@ func getLocksForVolume(volumeName string, driver BackupStoreDriver) []*FileLock lock, err := loadLock(volumeName, name, driver) if err != nil { file := getLockFilePath(volumeName, name) - log.WithError(err).Warnf("Failed to load lock %v on backupstore", file) + log.Warnf("failed to load lock %v on backupstore reason %v", file, err) continue } locks = append(locks, lock) diff --git a/vendor/github.com/longhorn/backupstore/logging/logging.go b/vendor/github.com/longhorn/backupstore/logging/logging.go index c040e98d9..e57d9e96e 100644 --- a/vendor/github.com/longhorn/backupstore/logging/logging.go +++ b/vendor/github.com/longhorn/backupstore/logging/logging.go @@ -18,7 +18,6 @@ const ( LogFieldLastSnapshot = "last_snapshot" LogEventBackupURL = "backup_url" LogFieldDestURL = "dest_url" - LogFieldSourceURL = "source_url" LogFieldKind = "kind" LogFieldFilepath = "filepath" diff --git a/vendor/github.com/longhorn/backupstore/nfs/nfs.go b/vendor/github.com/longhorn/backupstore/nfs/nfs.go index ef39d30a4..15af7aea1 100644 --- a/vendor/github.com/longhorn/backupstore/nfs/nfs.go +++ b/vendor/github.com/longhorn/backupstore/nfs/nfs.go @@ -7,18 +7,16 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/longhorn/backupstore" "github.com/longhorn/backupstore/fsops" "github.com/longhorn/backupstore/util" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( - log = logrus.WithFields(logrus.Fields{"pkg": "nfs"}) - MinorVersions = []string{"4.2", "4.1", "4.0"} - defaultTimeout = 5 * time.Second + log = logrus.WithFields(logrus.Fields{"pkg": "nfs"}) + MinorVersions = []string{"4.2", "4.1", "4.0"} ) type BackupStoreDriver struct { @@ -61,24 +59,24 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { return nil, fmt.Errorf("NFS path must follow: nfs://server:/path/ format") } if u.Path == "" { - return nil, fmt.Errorf("cannot find nfs path") + return nil, fmt.Errorf("Cannot find nfs path") } b.serverPath = u.Host + u.Path b.mountDir = filepath.Join(MountDir, strings.TrimRight(strings.Replace(u.Host, ".", "_", -1), ":"), u.Path) - if _, err = util.ExecuteWithCustomTimeout("mkdir", []string{"-m", "700", "-p", b.mountDir}, defaultTimeout); err != nil { - return nil, errors.Wrapf(err, "cannot create mount directory %v for NFS server", b.mountDir) + if _, err = util.ExecuteWithCustomTimeout("mkdir", []string{"-m", "700", "-p", b.mountDir}, 3*time.Second); err != nil { + return nil, fmt.Errorf("Cannot create mount directory %v for NFS server: %v", b.mountDir, err) } if err := b.mount(); err != nil { - return nil, errors.Wrapf(err, "cannot mount nfs %v", b.serverPath) + return nil, fmt.Errorf("Cannot mount nfs %v: %v", b.serverPath, err) } if _, err := b.List(""); err != nil { return nil, fmt.Errorf("NFS path %v doesn't exist or is not a directory", b.serverPath) } b.destURL = KIND + "://" + b.serverPath - log.Infof("Loaded driver for %v", b.destURL) + log.Debugf("Loaded driver for %v", b.destURL) return b, nil } @@ -90,8 +88,8 @@ func (b *BackupStoreDriver) mount() (err error) { retErr := errors.New("Cannot mount using NFSv4") for _, version := range MinorVersions { - log.Debugf("Attempting mount for nfs path %v with nfsvers %v", b.serverPath, version) - _, err = util.ExecuteWithCustomTimeout("mount", []string{"-t", "nfs4", "-o", fmt.Sprintf("nfsvers=%v", version), "-o", "actimeo=1", b.serverPath, b.mountDir}, defaultTimeout) + log.Debugf("attempting mount for nfs path %v with nfsvers %v", b.serverPath, version) + _, err = util.Execute("mount", []string{"-t", "nfs4", "-o", fmt.Sprintf("nfsvers=%v", version), "-o", "actimeo=1", b.serverPath, b.mountDir}) if err == nil { return nil } diff --git a/vendor/github.com/longhorn/backupstore/s3/s3.go b/vendor/github.com/longhorn/backupstore/s3/s3.go index a57648f7a..aef885092 100644 --- a/vendor/github.com/longhorn/backupstore/s3/s3.go +++ b/vendor/github.com/longhorn/backupstore/s3/s3.go @@ -10,10 +10,9 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/sirupsen/logrus" - "github.com/longhorn/backupstore" "github.com/longhorn/backupstore/http" + "github.com/sirupsen/logrus" ) var ( @@ -61,12 +60,13 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { } // add custom ca to http client that is used by s3 service - customCerts := getCustomCerts() - client, err := http.GetClientWithCustomCerts(customCerts) - if err != nil { - return nil, err + if customCerts := getCustomCerts(); customCerts != nil { + client, err := http.GetClientWithCustomCerts(customCerts) + if err != nil { + return nil, err + } + b.service.Client = client } - b.service.Client = client //Leading '/' can cause mystery problems for s3 b.path = strings.TrimLeft(b.path, "/") @@ -82,7 +82,7 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { } b.destURL += "/" + b.path - log.Infof("Loaded driver for %v", b.destURL) + log.Debugf("Loaded driver for %v", b.destURL) return b, nil } @@ -114,7 +114,7 @@ func (s *BackupStoreDriver) List(listPath string) ([]string, error) { path := s.updatePath(listPath) + "/" contents, prefixes, err := s.service.ListObjects(path, "/") if err != nil { - log.WithError(err).Error("Failed to list s3") + log.Error("Fail to list s3: ", err) return result, err } @@ -195,17 +195,11 @@ func (s *BackupStoreDriver) Download(src, dst string) error { if _, err := os.Stat(dst); err != nil { os.Remove(dst) } - - if err := os.MkdirAll(filepath.Dir(dst), os.ModeDir|0700); err != nil { - return err - } - f, err := os.Create(dst) if err != nil { return err } defer f.Close() - path := s.updatePath(src) rc, err := s.service.GetObject(path) if err != nil { @@ -214,5 +208,8 @@ func (s *BackupStoreDriver) Download(src, dst string) error { defer rc.Close() _, err = io.Copy(f, rc) - return err + if err != nil { + return err + } + return nil } diff --git a/vendor/github.com/longhorn/backupstore/s3/s3_service.go b/vendor/github.com/longhorn/backupstore/s3/s3_service.go index 6cee12e7d..6c516dcbd 100644 --- a/vendor/github.com/longhorn/backupstore/s3/s3_service.go +++ b/vendor/github.com/longhorn/backupstore/s3/s3_service.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - "github.com/pkg/errors" ) type Service struct { @@ -164,12 +163,12 @@ func (s *Service) DeleteObjects(key string) error { objects, _, err := s.ListObjects(key, "") if err != nil { - return errors.Wrapf(err, "failed to list objects with prefix %v before removing them", key) + return fmt.Errorf("failed to list objects with prefix %v before removing them error: %v", key, err) } svc, err := s.New() if err != nil { - return errors.Wrap(err, "failed to get a new s3 client instance before removing objects") + return fmt.Errorf("failed to get a new s3 client instance before removing objects: %v", err) } defer s.Close() @@ -181,7 +180,7 @@ func (s *Service) DeleteObjects(key string) error { }) if err != nil { - log.Errorf("Failed to delete object: %v response: %v error: %v", + log.Errorf("failed to delete object: %v response: %v error: %v", aws.StringValue(object.Key), resp.String(), parseAwsError(err)) deletionFailures = append(deletionFailures, aws.StringValue(object.Key)) } diff --git a/vendor/github.com/longhorn/backupstore/singlefile.go b/vendor/github.com/longhorn/backupstore/singlefile.go index 82b64ee5b..24af97cb9 100644 --- a/vendor/github.com/longhorn/backupstore/singlefile.go +++ b/vendor/github.com/longhorn/backupstore/singlefile.go @@ -1,13 +1,13 @@ package backupstore import ( + "fmt" "path/filepath" - "github.com/pkg/errors" + "github.com/longhorn/backupstore/util" "github.com/sirupsen/logrus" . "github.com/longhorn/backupstore/logging" - "github.com/longhorn/backupstore/util" ) const ( @@ -29,11 +29,11 @@ func CreateSingleFileBackup(volume *Volume, snapshot *Snapshot, filePath, destUR return "", err } - if err := addVolume(driver, volume); err != nil { + if err := addVolume(volume, driver); err != nil { return "", err } - volume, err = loadVolume(driver, volume.Name) + volume, err = loadVolume(volume.Name, driver) if err != nil { return "", err } @@ -51,7 +51,6 @@ func CreateSingleFileBackup(volume *Volume, snapshot *Snapshot, filePath, destUR VolumeName: volume.Name, SnapshotName: snapshot.Name, SnapshotCreatedAt: snapshot.CreatedTime, - CompressionMethod: volume.CompressionMethod, } backup.SingleFile.FilePath = getSingleFileBackupFilePath(backup) @@ -60,7 +59,7 @@ func CreateSingleFileBackup(volume *Volume, snapshot *Snapshot, filePath, destUR } backup.CreatedTime = util.Now() - if err := saveBackup(driver, backup); err != nil { + if err := saveBackup(backup, driver); err != nil { return "", err } @@ -85,14 +84,14 @@ func RestoreSingleFileBackup(backupURL, path string) (string, error) { return "", err } - if _, err := loadVolume(driver, srcVolumeName); err != nil { + if _, err := loadVolume(srcVolumeName, driver); err != nil { return "", generateError(logrus.Fields{ LogFieldVolume: srcVolumeName, LogEventBackupURL: backupURL, }, "Volume doesn't exist in backupstore: %v", err) } - backup, err := loadBackup(driver, srcBackupName, srcVolumeName) + backup, err := loadBackup(srcBackupName, srcVolumeName, driver) if err != nil { return "", err } @@ -116,12 +115,12 @@ func DeleteSingleFileBackup(backupURL string) error { return err } - _, err = loadVolume(driver, volumeName) + _, err = loadVolume(volumeName, driver) if err != nil { - return errors.Wrapf(err, "cannot find volume %v in backupstore", volumeName) + return fmt.Errorf("Cannot find volume %v in backupstore due to: %v", volumeName, err) } - backup, err := loadBackup(driver, backupName, volumeName) + backup, err := loadBackup(backupName, volumeName, driver) if err != nil { return err } @@ -130,5 +129,9 @@ func DeleteSingleFileBackup(backupURL string) error { return err } - return removeBackup(backup, driver) + if err := removeBackup(backup, driver); err != nil { + return err + } + + return nil } diff --git a/vendor/github.com/longhorn/backupstore/util/util.go b/vendor/github.com/longhorn/backupstore/util/util.go index b6ed34a78..be23b864b 100644 --- a/vendor/github.com/longhorn/backupstore/util/util.go +++ b/vendor/github.com/longhorn/backupstore/util/util.go @@ -4,20 +4,17 @@ import ( "bytes" "compress/gzip" "context" - "crypto/sha256" "crypto/sha512" "encoding/hex" "fmt" "io" "io/ioutil" - "os" "os/exec" "regexp" "strings" "time" "github.com/google/uuid" - lz4 "github.com/pierrec/lz4/v4" "github.com/sirupsen/logrus" ) @@ -29,14 +26,6 @@ var ( cmdTimeout = time.Minute // one minute by default ) -// NopCloser wraps an io.Witer as io.WriteCloser -// with noop Close -type NopCloser struct { - io.Writer -} - -func (NopCloser) Close() error { return nil } - func GenerateName(prefix string) string { suffix := strings.Replace(NewUUID(), "-", "", -1) return prefix + "-" + suffix[:16] @@ -52,43 +41,19 @@ func GetChecksum(data []byte) string { return checksum } -func GetFileChecksum(filePath string) (string, error) { - f, err := os.Open(filePath) - if err != nil { - return "", err - } - defer f.Close() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return "", err - } - - return hex.EncodeToString(h.Sum(nil)), nil -} - -func CompressData(method string, data []byte) (io.ReadSeeker, error) { - if method == "none" { - return bytes.NewReader(data), nil - } - - var buffer bytes.Buffer - - w, err := newCompressionWriter(method, &buffer) - if err != nil { - return nil, err - } - +func CompressData(data []byte) (io.ReadSeeker, error) { + var b bytes.Buffer + w := gzip.NewWriter(&b) if _, err := w.Write(data); err != nil { w.Close() return nil, err } w.Close() - return bytes.NewReader(buffer.Bytes()), nil + return bytes.NewReader(b.Bytes()), nil } -func DecompressAndVerify(method string, src io.Reader, checksum string) (io.Reader, error) { - r, err := newDecompressionReader(method, src) +func DecompressAndVerify(src io.Reader, checksum string) (io.Reader, error) { + r, err := gzip.NewReader(src) if err != nil { return nil, err } @@ -103,30 +68,6 @@ func DecompressAndVerify(method string, src io.Reader, checksum string) (io.Read return bytes.NewReader(block), nil } -func newCompressionWriter(method string, buffer io.Writer) (io.WriteCloser, error) { - switch method { - case "gzip": - return gzip.NewWriter(buffer), nil - case "lz4": - return lz4.NewWriter(buffer), nil - default: - return nil, fmt.Errorf("unsupported compression method: %v", method) - } -} - -func newDecompressionReader(method string, r io.Reader) (io.ReadCloser, error) { - switch method { - case "none": - return ioutil.NopCloser(r), nil - case "gzip": - return gzip.NewReader(r) - case "lz4": - return ioutil.NopCloser(lz4.NewReader(r)), nil - default: - return nil, fmt.Errorf("unsupported decompression method: %v", method) - } -} - func Now() string { return time.Now().UTC().Format(time.RFC3339) } @@ -213,11 +154,11 @@ func execute(ctx context.Context, binary string, args []string) (string, error) case <-done: break case <-ctx.Done(): - return "", fmt.Errorf("timeout executing: %v %v, output %v, error %v", binary, args, string(output), err) + return "", fmt.Errorf("Timeout executing: %v %v, output %v, error %v", binary, args, string(output), err) } if err != nil { - return "", fmt.Errorf("failed to execute: %v %v, output %v, error %v", binary, args, string(output), err) + return "", fmt.Errorf("Failed to execute: %v %v, output %v, error %v", binary, args, string(output), err) } return string(output), nil diff --git a/vendor/github.com/longhorn/backupstore/vfs/vfs.go b/vendor/github.com/longhorn/backupstore/vfs/vfs.go index ab1758ab2..74d207ddd 100644 --- a/vendor/github.com/longhorn/backupstore/vfs/vfs.go +++ b/vendor/github.com/longhorn/backupstore/vfs/vfs.go @@ -53,14 +53,14 @@ func initFunc(destURL string) (backupstore.BackupStoreDriver, error) { b.path = u.Path if b.path == "" { - return nil, fmt.Errorf("cannot find vfs path") + return nil, fmt.Errorf("Cannot find vfs path") } if _, err := b.List(""); err != nil { return nil, fmt.Errorf("VFS path %v doesn't exist or is not a directory", b.path) } b.destURL = KIND + "://" + b.path - log.Infof("Loaded driver for %v", b.destURL) + log.Debugf("Loaded driver for %v", b.destURL) return b, nil } diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/backingfile/backingfile.go b/vendor/github.com/longhorn/longhorn-engine/pkg/backingfile/backingfile.go index eb939c152..ab4e7eff2 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/backingfile/backingfile.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/backingfile/backingfile.go @@ -16,8 +16,6 @@ import ( "github.com/longhorn/longhorn-engine/pkg/util" ) -const defaultSectorSize = 512 - type BackingFile struct { Size int64 SectorSize int64 @@ -102,14 +100,14 @@ func OpenBackingFile(file string) (*BackingFile, error) { if err != nil { return nil, err } - if size%defaultSectorSize != 0 { - return nil, fmt.Errorf("the backing file size %v should be a multiple of 512 bytes since Longhorn uses directIO by default", size) + if size%util.BackingImageSectorSize != 0 { + return nil, fmt.Errorf("the backing file size %v should be a multiple of %v bytes since Longhorn uses directIO by default", size, util.BackingImageSectorSize) } return &BackingFile{ Path: file, Disk: f, Size: size, - SectorSize: defaultSectorSize, + SectorSize: util.BackingImageSectorSize, }, nil } diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/controller/client/controller_client.go b/vendor/github.com/longhorn/longhorn-engine/pkg/controller/client/controller_client.go index 0a7252fc3..00f0a031b 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/controller/client/controller_client.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/controller/client/controller_client.go @@ -5,14 +5,15 @@ import ( "time" "github.com/golang/protobuf/ptypes/empty" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" + healthpb "google.golang.org/grpc/health/grpc_health_v1" "github.com/longhorn/longhorn-engine/pkg/meta" "github.com/longhorn/longhorn-engine/pkg/types" "github.com/longhorn/longhorn-engine/pkg/util" "github.com/longhorn/longhorn-engine/proto/ptypes" - healthpb "google.golang.org/grpc/health/grpc_health_v1" ) type ControllerServiceContext struct { @@ -44,7 +45,7 @@ func NewControllerClient(address string) (*ControllerClient, error) { getControllerServiceContext := func(serviceUrl string) (ControllerServiceContext, error) { connection, err := grpc.Dial(serviceUrl, grpc.WithInsecure()) if err != nil { - return ControllerServiceContext{}, fmt.Errorf("cannot connect to ControllerService %v: %v", serviceUrl, err) + return ControllerServiceContext{}, errors.Wrapf(err, "cannot connect to ControllerService %v", serviceUrl) } return ControllerServiceContext{ @@ -118,7 +119,7 @@ func (c *ControllerClient) VolumeGet() (*types.VolumeInfo, error) { volume, err := controllerServiceClient.VolumeGet(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get volume %v: %v", c.serviceURL, err) + return nil, errors.Wrapf(err, "failed to get volume %v", c.serviceURL) } return GetVolumeInfo(volume), nil @@ -134,7 +135,7 @@ func (c *ControllerClient) VolumeStart(size, currentSize int64, replicas ...stri Size: size, CurrentSize: currentSize, }); err != nil { - return fmt.Errorf("failed to start volume %v: %v", c.serviceURL, err) + return errors.Wrapf(err, "failed to start volume %v", c.serviceURL) } return nil @@ -150,7 +151,7 @@ func (c *ControllerClient) VolumeSnapshot(name string, labels map[string]string) Labels: labels, }) if err != nil { - return "", fmt.Errorf("failed to create snapshot %v for volume %v: %v", name, c.serviceURL, err) + return "", errors.Wrapf(err, "failed to create snapshot %v for volume %v", name, c.serviceURL) } return reply.Name, nil @@ -164,7 +165,7 @@ func (c *ControllerClient) VolumeRevert(snapshot string) error { if _, err := controllerServiceClient.VolumeRevert(ctx, &ptypes.VolumeRevertRequest{ Name: snapshot, }); err != nil { - return fmt.Errorf("failed to revert to snapshot %v for volume %v: %v", snapshot, c.serviceURL, err) + return errors.Wrapf(err, "failed to revert to snapshot %v for volume %v", snapshot, c.serviceURL) } return nil @@ -178,7 +179,7 @@ func (c *ControllerClient) VolumeExpand(size int64) error { if _, err := controllerServiceClient.VolumeExpand(ctx, &ptypes.VolumeExpandRequest{ Size: size, }); err != nil { - return fmt.Errorf("failed to expand to size %v for volume %v: %v", size, c.serviceURL, err) + return errors.Wrapf(err, "failed to expand to size %v for volume %v", size, c.serviceURL) } return nil @@ -192,7 +193,7 @@ func (c *ControllerClient) VolumeFrontendStart(frontend string) error { if _, err := controllerServiceClient.VolumeFrontendStart(ctx, &ptypes.VolumeFrontendStartRequest{ Frontend: frontend, }); err != nil { - return fmt.Errorf("failed to start frontend %v for volume %v: %v", frontend, c.serviceURL, err) + return errors.Wrapf(err, "failed to start frontend %v for volume %v", frontend, c.serviceURL) } return nil @@ -204,7 +205,7 @@ func (c *ControllerClient) VolumeFrontendShutdown() error { defer cancel() if _, err := controllerServiceClient.VolumeFrontendShutdown(ctx, &empty.Empty{}); err != nil { - return fmt.Errorf("failed to shutdown frontend for volume %v: %v", c.serviceURL, err) + return errors.Wrapf(err, "failed to shutdown frontend for volume %v", c.serviceURL) } return nil @@ -217,7 +218,7 @@ func (c *ControllerClient) ReplicaList() ([]*types.ControllerReplicaInfo, error) reply, err := controllerServiceClient.ReplicaList(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to list replicas for volume %v: %v", c.serviceURL, err) + return nil, errors.Wrapf(err, "failed to list replicas for volume %v", c.serviceURL) } replicas := []*types.ControllerReplicaInfo{} @@ -237,7 +238,7 @@ func (c *ControllerClient) ReplicaGet(address string) (*types.ControllerReplicaI Address: address, }) if err != nil { - return nil, fmt.Errorf("failed to get replica %v for volume %v: %v", address, c.serviceURL, err) + return nil, errors.Wrapf(err, "failed to get replica %v for volume %v", address, c.serviceURL) } return GetControllerReplicaInfo(cr), nil @@ -254,7 +255,7 @@ func (c *ControllerClient) ReplicaCreate(address string, snapshotRequired bool, Mode: ptypes.ReplicaModeToGRPCReplicaMode(mode), }) if err != nil { - return nil, fmt.Errorf("failed to create replica %v for volume %v: %v", address, c.serviceURL, err) + return nil, errors.Wrapf(err, "failed to create replica %v for volume %v", address, c.serviceURL) } return GetControllerReplicaInfo(cr), nil @@ -268,7 +269,7 @@ func (c *ControllerClient) ReplicaDelete(address string) error { if _, err := controllerServiceClient.ReplicaDelete(ctx, &ptypes.ReplicaAddress{ Address: address, }); err != nil { - return fmt.Errorf("failed to delete replica %v for volume %v: %v", address, c.serviceURL, err) + return errors.Wrapf(err, "failed to delete replica %v for volume %v", address, c.serviceURL) } return nil @@ -281,7 +282,7 @@ func (c *ControllerClient) ReplicaUpdate(replica *types.ControllerReplicaInfo) ( cr, err := controllerServiceClient.ReplicaUpdate(ctx, GetControllerReplica(replica)) if err != nil { - return nil, fmt.Errorf("failed to update replica %v for volume %v: %v", replica.Address, c.serviceURL, err) + return nil, errors.Wrapf(err, "failed to update replica %v for volume %v", replica.Address, c.serviceURL) } return GetControllerReplicaInfo(cr), nil @@ -296,7 +297,7 @@ func (c *ControllerClient) ReplicaPrepareRebuild(address string) ([]types.SyncFi Address: address, }) if err != nil { - return nil, fmt.Errorf("failed to prepare rebuilding replica %v for volume %v: %v", address, c.serviceURL, err) + return nil, errors.Wrapf(err, "failed to prepare rebuilding replica %v for volume %v", address, c.serviceURL) } return GetSyncFileInfoList(reply.SyncFileInfoList), nil @@ -310,7 +311,7 @@ func (c *ControllerClient) ReplicaVerifyRebuild(address string) error { if _, err := controllerServiceClient.ReplicaVerifyRebuild(ctx, &ptypes.ReplicaAddress{ Address: address, }); err != nil { - return fmt.Errorf("failed to verify rebuilt replica %v for volume %v: %v", address, c.serviceURL, err) + return errors.Wrapf(err, "failed to verify rebuilt replica %v for volume %v", address, c.serviceURL) } return nil @@ -324,7 +325,7 @@ func (c *ControllerClient) JournalList(limit int) error { if _, err := controllerServiceClient.JournalList(ctx, &ptypes.JournalListRequest{ Limit: int64(limit), }); err != nil { - return fmt.Errorf("failed to list journal for volume %v: %v", c.serviceURL, err) + return errors.Wrapf(err, "failed to list journal for volume %v", c.serviceURL) } return nil @@ -337,7 +338,7 @@ func (c *ControllerClient) VersionDetailGet() (*meta.VersionOutput, error) { reply, err := controllerServiceClient.VersionDetailGet(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get version detail: %v", err) + return nil, errors.Wrap(err, "failed to get version detail") } return &meta.VersionOutput{ @@ -357,7 +358,7 @@ func (c *ControllerClient) VersionDetailGet() (*meta.VersionOutput, error) { func (c *ControllerClient) Check() error { conn, err := grpc.Dial(c.serviceURL, grpc.WithInsecure()) if err != nil { - return fmt.Errorf("cannot connect to ControllerService %v: %v", c.serviceURL, err) + return errors.Wrapf(err, "cannot connect to ControllerService %v", c.serviceURL) } defer conn.Close() // TODO: JM we can reuse the controller service context connection for the health requests @@ -370,7 +371,7 @@ func (c *ControllerClient) Check() error { Service: "", }) if err != nil { - return fmt.Errorf("failed to check health for gRPC controller server %v: %v", c.serviceURL, err) + return errors.Wrapf(err, "failed to check health for gRPC controller server %v", c.serviceURL) } if reply.Status != healthpb.HealthCheckResponse_SERVING { diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/qcow/libqcow.go b/vendor/github.com/longhorn/longhorn-engine/pkg/qcow/libqcow.go index e85ee8e96..2d60db9da 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/qcow/libqcow.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/qcow/libqcow.go @@ -20,7 +20,7 @@ func toError(e *C.libqcow_error_t) error { buf := [1024]C.char{} defer C.libqcow_error_free(&e) if C.libqcow_error_sprint(e, &buf[0], 1023) < 0 { - return fmt.Errorf("Unknown error: %v", e) + return fmt.Errorf("unknown error: %v", e) } return errors.New(C.GoString(&buf[0])) } @@ -44,7 +44,7 @@ func Open(path string) (*Qcow, error) { } func (q *Qcow) WriteAt(buf []byte, off int64) (int, error) { - return 0, errors.New("Unsupported operation") + return 0, errors.New("unsupported operation") } func (q *Qcow) ReadAt(buf []byte, off int64) (int, error) { diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/backup.go b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/backup.go index 20a4ce306..fed662ac4 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/backup.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/backup.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/longhorn/longhorn-engine/pkg/backingfile" @@ -206,12 +207,12 @@ func (rb *BackupStatus) OpenSnapshot(snapID, volumeID string) error { } if rb.volumeID != "" { - return fmt.Errorf("Volume %s and snapshot %s are already open, close first", rb.volumeID, rb.SnapshotID) + return fmt.Errorf("volume %s and snapshot %s are already open, close first", rb.volumeID, rb.SnapshotID) } dir, err := os.Getwd() if err != nil { - return fmt.Errorf("Cannot get working directory: %v", err) + return errors.Wrap(err, "cannot get working directory") } r, err := NewReadOnly(dir, id, rb.backingFile) if err != nil { @@ -227,7 +228,7 @@ func (rb *BackupStatus) OpenSnapshot(snapID, volumeID string) error { func (rb *BackupStatus) assertOpen(id, volumeID string) error { if rb.volumeID != volumeID || rb.SnapshotID != id { - return fmt.Errorf("Invalid state volume [%s] and snapshot [%s] are open, not volume [%s], snapshot [%s]", rb.volumeID, rb.SnapshotID, volumeID, id) + return fmt.Errorf("invalid state volume [%s] and snapshot [%s] are open, not volume [%s], snapshot [%s]", rb.volumeID, rb.SnapshotID, volumeID, id) } return nil } @@ -288,12 +289,12 @@ func (rb *BackupStatus) CompareSnapshot(snapID, compareSnapID, volumeID string) from := rb.findIndex(id) if from < 0 { - return nil, fmt.Errorf("Failed to find snapshot %s in chain", id) + return nil, fmt.Errorf("failed to find snapshot %s in chain", id) } to := rb.findIndex(compareID) if to < 0 { - return nil, fmt.Errorf("Failed to find snapshot %s in chain", compareID) + return nil, fmt.Errorf("failed to find snapshot %s in chain", compareID) } mappings := &backupstore.Mappings{ diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/client/client.go b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/client/client.go index eb2790a0f..a788bacdf 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/client/client.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/client/client.go @@ -7,6 +7,7 @@ import ( "time" "github.com/golang/protobuf/ptypes/empty" + "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" @@ -192,7 +193,7 @@ func (c *ReplicaClient) GetReplica() (*types.ReplicaInfo, error) { resp, err := replicaServiceClient.ReplicaGet(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get replica %v: %v", c.replicaServiceURL, err) + return nil, errors.Wrapf(err, "failed to get replica %v", c.replicaServiceURL) } return GetReplicaInfo(resp.Replica), nil @@ -207,7 +208,7 @@ func (c *ReplicaClient) OpenReplica() error { defer cancel() if _, err := replicaServiceClient.ReplicaOpen(ctx, &empty.Empty{}); err != nil { - return fmt.Errorf("failed to open replica %v: %v", c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to open replica %v", c.replicaServiceURL) } return nil @@ -222,7 +223,7 @@ func (c *ReplicaClient) CloseReplica() error { defer cancel() if _, err := replicaServiceClient.ReplicaClose(ctx, &empty.Empty{}); err != nil { - return fmt.Errorf("failed to close replica %v: %v", c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to close replica %v", c.replicaServiceURL) } return nil @@ -238,7 +239,7 @@ func (c *ReplicaClient) ReloadReplica() (*types.ReplicaInfo, error) { resp, err := replicaServiceClient.ReplicaReload(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to reload replica %v: %v", c.replicaServiceURL, err) + return nil, errors.Wrapf(err, "failed to reload replica %v", c.replicaServiceURL) } return GetReplicaInfo(resp.Replica), nil @@ -274,7 +275,7 @@ func (c *ReplicaClient) Revert(name, created string) error { Name: name, Created: created, }); err != nil { - return fmt.Errorf("failed to revert replica %v: %v", c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to revert replica %v", c.replicaServiceURL) } return nil @@ -292,7 +293,7 @@ func (c *ReplicaClient) RemoveDisk(disk string, force bool) error { Name: disk, Force: force, }); err != nil { - return fmt.Errorf("failed to remove disk %v for replica %v: %v", disk, c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to remove disk %v for replica %v", disk, c.replicaServiceURL) } return nil @@ -310,7 +311,7 @@ func (c *ReplicaClient) ReplaceDisk(target, source string) error { Target: target, Source: source, }); err != nil { - return fmt.Errorf("failed to replace disk %v with %v for replica %v: %v", target, source, c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to replace disk %v with %v for replica %v", target, source, c.replicaServiceURL) } return nil @@ -329,7 +330,7 @@ func (c *ReplicaClient) PrepareRemoveDisk(disk string) ([]*types.PrepareRemoveAc }) if err != nil { - return nil, fmt.Errorf("failed to prepare removing disk %v for replica %v: %v", disk, c.replicaServiceURL, err) + return nil, errors.Wrapf(err, "failed to prepare removing disk %v for replica %v", disk, c.replicaServiceURL) } operations := []*types.PrepareRemoveAction{} @@ -355,7 +356,7 @@ func (c *ReplicaClient) MarkDiskAsRemoved(disk string) error { if _, err := replicaServiceClient.DiskMarkAsRemoved(ctx, &ptypes.DiskMarkAsRemovedRequest{ Name: disk, }); err != nil { - return fmt.Errorf("failed to mark disk %v as removed for replica %v: %v", disk, c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to mark disk %v as removed for replica %v", disk, c.replicaServiceURL) } return nil @@ -372,7 +373,7 @@ func (c *ReplicaClient) SetRebuilding(rebuilding bool) error { if _, err := replicaServiceClient.RebuildingSet(ctx, &ptypes.RebuildingSetRequest{ Rebuilding: rebuilding, }); err != nil { - return fmt.Errorf("failed to set rebuilding to %v for replica %v: %v", rebuilding, c.replicaServiceURL, err) + return errors.Wrapf(err, "failed to set rebuilding to %v for replica %v", rebuilding, c.replicaServiceURL) } return nil @@ -389,7 +390,7 @@ func (c *ReplicaClient) RemoveFile(file string) error { if _, err := syncAgentServiceClient.FileRemove(ctx, &ptypes.FileRemoveRequest{ FileName: file, }); err != nil { - return fmt.Errorf("failed to remove file %v: %v", file, err) + return errors.Wrapf(err, "failed to remove file %v", file) } return nil @@ -407,7 +408,7 @@ func (c *ReplicaClient) RenameFile(oldFileName, newFileName string) error { OldFileName: oldFileName, NewFileName: newFileName, }); err != nil { - return fmt.Errorf("failed to rename or replace old file %v with new file %v: %v", oldFileName, newFileName, err) + return errors.Wrapf(err, "failed to rename or replace old file %v with new file %v", oldFileName, newFileName) } return nil @@ -426,7 +427,7 @@ func (c *ReplicaClient) SendFile(from, host string, port int32) error { Host: host, Port: port, }); err != nil { - return fmt.Errorf("failed to send file %v to %v:%v: %v", from, host, port, err) + return errors.Wrapf(err, "failed to send file %v to %v:%v", from, host, port) } return nil @@ -447,7 +448,7 @@ func (c *ReplicaClient) ExportVolume(snapshotName, host string, port int32, expo Port: port, ExportBackingImageIfExist: exportBackingImageIfExist, }); err != nil { - return fmt.Errorf("failed to export snapshot %v to %v:%v: %v", snapshotName, host, port, err) + return errors.Wrapf(err, "failed to export snapshot %v to %v:%v", snapshotName, host, port) } return nil } @@ -464,7 +465,7 @@ func (c *ReplicaClient) LaunchReceiver(toFilePath string) (string, int32, error) ToFileName: toFilePath, }) if err != nil { - return "", 0, fmt.Errorf("failed to launch receiver for %v: %v", toFilePath, err) + return "", 0, errors.Wrapf(err, "failed to launch receiver for %v", toFilePath) } return c.host, reply.Port, nil @@ -483,7 +484,7 @@ func (c *ReplicaClient) SyncFiles(fromAddress string, list []types.SyncFileInfo) ToHost: c.host, SyncFileInfoList: syncFileInfoListToSyncAgentGRPCFormat(list), }); err != nil { - return fmt.Errorf("failed to sync files %+v from %v: %v", list, fromAddress, err) + return errors.Wrapf(err, "failed to sync files %+v from %v", list, fromAddress) } return nil @@ -508,7 +509,7 @@ func (c *ReplicaClient) CreateBackup(backupName, snapshot, dest, volume, backing BackupName: backupName, }) if err != nil { - return nil, fmt.Errorf("failed to create backup to %v for volume %v: %v", dest, volume, err) + return nil, errors.Wrapf(err, "failed to create backup to %v for volume %v", dest, volume) } return resp, nil @@ -544,7 +545,7 @@ func (c *ReplicaClient) RmBackup(backup string) error { if _, err := syncAgentServiceClient.BackupRemove(ctx, &ptypes.BackupRemoveRequest{ Backup: backup, }); err != nil { - return fmt.Errorf("failed to remove backup %v: %v", backup, err) + return errors.Wrapf(err, "failed to remove backup %v", backup) } return nil @@ -563,7 +564,7 @@ func (c *ReplicaClient) RestoreBackup(backup, snapshotDiskName string, credentia SnapshotDiskName: snapshotDiskName, Credential: credential, }); err != nil { - return fmt.Errorf("failed to restore backup data %v to snapshot file %v: %v", backup, snapshotDiskName, err) + return errors.Wrapf(err, "failed to restore backup data %v to snapshot file %v", backup, snapshotDiskName) } return nil @@ -578,7 +579,7 @@ func (c *ReplicaClient) Reset() error { defer cancel() if _, err := syncAgentServiceClient.Reset(ctx, &empty.Empty{}); err != nil { - return fmt.Errorf("failed to cleanup restore info in Sync Agent Server: %v", err) + return errors.Wrap(err, "failed to cleanup restore info in Sync Agent Server") } return nil @@ -594,7 +595,7 @@ func (c *ReplicaClient) RestoreStatus() (*ptypes.RestoreStatusResponse, error) { resp, err := syncAgentServiceClient.RestoreStatus(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get restore status: %v", err) + return nil, errors.Wrap(err, "failed to get restore status") } return resp, nil @@ -609,7 +610,7 @@ func (c *ReplicaClient) SnapshotPurge() error { defer cancel() if _, err := syncAgentServiceClient.SnapshotPurge(ctx, &empty.Empty{}); err != nil { - return fmt.Errorf("failed to start snapshot purge: %v", err) + return errors.Wrap(err, "failed to start snapshot purge") } return nil @@ -625,7 +626,7 @@ func (c *ReplicaClient) SnapshotPurgeStatus() (*ptypes.SnapshotPurgeStatusRespon status, err := syncAgentServiceClient.SnapshotPurgeStatus(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get snapshot purge status: %v", err) + return nil, errors.Wrap(err, "failed to get snapshot purge status") } return status, nil @@ -641,7 +642,7 @@ func (c *ReplicaClient) ReplicaRebuildStatus() (*ptypes.ReplicaRebuildStatusResp status, err := syncAgentServiceClient.ReplicaRebuildStatus(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get replica rebuild status: %v", err) + return nil, errors.Wrap(err, "failed to get replica rebuild status") } return status, nil @@ -661,7 +662,7 @@ func (c *ReplicaClient) CloneSnapshot(fromAddress, snapshotFileName string, expo SnapshotFileName: snapshotFileName, ExportBackingImageIfExist: exportBackingImageIfExist, }); err != nil { - return fmt.Errorf("failed to clone snapshot %v from replica %v to host %v: %v", snapshotFileName, fromAddress, c.host, err) + return errors.Wrapf(err, "failed to clone snapshot %v from replica %v to host %v", snapshotFileName, fromAddress, c.host) } return nil @@ -677,7 +678,7 @@ func (c *ReplicaClient) SnapshotCloneStatus() (*ptypes.SnapshotCloneStatusRespon status, err := syncAgentServiceClient.SnapshotCloneStatus(ctx, &empty.Empty{}) if err != nil { - return nil, fmt.Errorf("failed to get snapshot clone status: %v", err) + return nil, errors.Wrap(err, "failed to get snapshot clone status") } return status, nil } diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/diff_disk.go b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/diff_disk.go index 1168043cb..671d37bd0 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/diff_disk.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/diff_disk.go @@ -114,7 +114,7 @@ func (d *diffDisk) readModifyWrite(buf []byte, offset int64) (int, error) { func (d *diffDisk) fullWriteAt(buf []byte, offset int64) (int, error) { if int64(len(buf))%d.sectorSize != 0 || offset%d.sectorSize != 0 { - return 0, fmt.Errorf("Write len(%d), offset %d not a multiple of %d", len(buf), offset, d.sectorSize) + return 0, fmt.Errorf("write len(%d), offset %d not a multiple of %d", len(buf), offset, d.sectorSize) } target := byte(len(d.files) - 1) @@ -172,7 +172,7 @@ func (d *diffDisk) ReadAt(buf []byte, offset int64) (int, error) { func (d *diffDisk) fullReadAt(buf []byte, offset int64) (int, error) { if int64(len(buf))%d.sectorSize != 0 || offset%d.sectorSize != 0 { - return 0, fmt.Errorf("Read not a multiple of %d", d.sectorSize) + return 0, fmt.Errorf("read not a multiple of %d", d.sectorSize) } if len(buf) == 0 { diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/replica.go b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/replica.go index 74535d416..f16a3dc7b 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/replica.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/replica.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path" @@ -30,7 +31,6 @@ const ( metadataSuffix = ".meta" imgSuffix = ".img" volumeMetaData = "volume.meta" - defaultSectorSize = 4096 headPrefix = "volume-head-" headSuffix = ".img" headName = headPrefix + "%03d" + headSuffix @@ -159,12 +159,12 @@ func New(size, sectorSize int64, dir string, backingFile *backingfile.BackingFil func NewReadOnly(dir, head string, backingFile *backingfile.BackingFile) (*Replica, error) { // size and sectorSize don't matter because they will be read from metadata - return construct(true, 0, 512, dir, head, backingFile, false) + return construct(true, 0, util.ReplicaSectorSize, dir, head, backingFile, false) } func construct(readonly bool, size, sectorSize int64, dir, head string, backingFile *backingfile.BackingFile, disableRevCounter bool) (*Replica, error) { if size%sectorSize != 0 { - return nil, fmt.Errorf("Size %d not a multiple of sector size %d", size, sectorSize) + return nil, fmt.Errorf("size %d not a multiple of sector size %d", size, sectorSize) } if err := os.Mkdir(dir, 0700); err != nil && !os.IsExist(err) { @@ -181,7 +181,12 @@ func construct(readonly bool, size, sectorSize int64, dir, head string, backingF } r.info.Size = size r.info.SectorSize = sectorSize - r.volume.sectorSize = defaultSectorSize + r.volume.sectorSize = util.VolumeSectorSize + + // Try to recover volume metafile if deleted or empty. + if err := r.tryRecoverVolumeMetaFile(head); err != nil { + return nil, err + } // Scan all the disks to build the disk map exists, err := r.readMetadata() @@ -205,7 +210,7 @@ func construct(readonly bool, size, sectorSize int64, dir, head string, backingF // Reference r.info.Size because it may have changed from reading // metadata locationSize := r.info.Size / r.volume.sectorSize - if size%defaultSectorSize != 0 { + if size%util.VolumeSectorSize != 0 { locationSize++ } r.volume.location = make([]byte, locationSize) @@ -365,7 +370,7 @@ func (r *Replica) RemoveDiffDisk(name string, force bool) error { defer r.Unlock() if name == r.info.Head { - return fmt.Errorf("Can not delete the active differencing disk") + return fmt.Errorf("cannot delete the active differencing disk") } if err := r.removeDiskNode(name, force); err != nil { @@ -396,11 +401,11 @@ func (r *Replica) MarkDiskAsRemoved(name string) error { } if disk == r.info.Head { - return fmt.Errorf("Can not mark the active differencing disk as removed") + return fmt.Errorf("cannot mark the active differencing disk as removed") } if err := r.markDiskAsRemoved(disk); err != nil { - return fmt.Errorf("Failed to mark disk %v as removed: %v", disk, err) + return errors.Wrapf(err, "failed to mark disk %v as removed", disk) } return nil @@ -408,18 +413,18 @@ func (r *Replica) MarkDiskAsRemoved(name string) error { func (r *Replica) hardlinkDisk(target, source string) error { if _, err := os.Stat(r.diskPath(source)); err != nil { - return fmt.Errorf("Cannot find source of replacing: %v", source) + return fmt.Errorf("cannot find source of replacing: %v", source) } if _, err := os.Stat(r.diskPath(target)); err == nil { logrus.Infof("Old file %s exists, deleting", target) if err := os.Remove(r.diskPath(target)); err != nil { - return fmt.Errorf("Failed to remove %s: %v", target, err) + return errors.Wrapf(err, "failed to remove %s", target) } } if err := os.Link(r.diskPath(source), r.diskPath(target)); err != nil { - return fmt.Errorf("Failed to link %s to %s", source, target) + return fmt.Errorf("failed to link %s to %s", source, target) } return nil } @@ -429,7 +434,7 @@ func (r *Replica) ReplaceDisk(target, source string) error { defer r.Unlock() if target == r.info.Head { - return fmt.Errorf("Can not replace the active differencing disk") + return fmt.Errorf("cannot replace the active differencing disk") } if err := r.hardlinkDisk(target, source); err != nil { @@ -478,7 +483,7 @@ func (r *Replica) removeDiskNode(name string, force bool) error { // If snapshot has more than one child, we cannot really delete it if len(children) > 1 { if !force { - return fmt.Errorf("Cannot remove snapshot %v with %v children", + return fmt.Errorf("cannot remove snapshot %v with %v children", name, len(children)) } logrus.Warnf("force delete disk %v with multiple children. Randomly choose a child to inherit", name) @@ -530,11 +535,11 @@ func (r *Replica) PrepareRemoveDisk(name string) ([]PrepareRemoveAction, error) } if disk == r.info.Head { - return nil, fmt.Errorf("Can not delete the active differencing disk") + return nil, fmt.Errorf("cannot delete the active differencing disk") } if !data.Removed { - return nil, fmt.Errorf("Disk %v hasn't been marked as removed", disk) + return nil, fmt.Errorf("disk %v hasn't been marked as removed", disk) } actions, err := r.processPrepareRemoveDisks(disk) @@ -548,7 +553,7 @@ func (r *Replica) processPrepareRemoveDisks(disk string) ([]PrepareRemoveAction, actions := []PrepareRemoveAction{} if _, exists := r.diskData[disk]; !exists { - return nil, fmt.Errorf("Wrong disk %v doesn't exist", disk) + return nil, fmt.Errorf("wrong disk %v doesn't exist", disk) } children := r.diskChildrenMap[disk] @@ -610,7 +615,7 @@ func (r *Replica) DisplayChain() ([]string, error) { for cur != "" { disk, ok := r.diskData[cur] if !ok { - return nil, fmt.Errorf("Failed to find metadata for %s", cur) + return nil, fmt.Errorf("failed to find metadata for %s", cur) } if !disk.Removed { result = append(result, cur) @@ -631,7 +636,7 @@ func (r *Replica) Chain() ([]string, error) { for cur != "" { result = append(result, cur) if _, ok := r.diskData[cur]; !ok { - return nil, fmt.Errorf("Failed to find metadata for %s", cur) + return nil, fmt.Errorf("failed to find metadata for %s", cur) } cur = r.diskData[cur].Parent } @@ -709,7 +714,7 @@ func (r *Replica) nextFile(parsePattern *regexp.Regexp, pattern, parent string) matches := parsePattern.FindStringSubmatch(parent) if matches == nil { - return "", fmt.Errorf("Invalid name %s does not match pattern: %v", parent, parsePattern) + return "", fmt.Errorf("invalid name %s does not match pattern: %v", parent, parsePattern) } index, _ := strconv.Atoi(matches[1]) @@ -793,13 +798,13 @@ func (r *Replica) linkDisk(oldname, newname string) error { func (r *Replica) markDiskAsRemoved(name string) error { disk, ok := r.diskData[name] if !ok { - return fmt.Errorf("Cannot find disk %v", name) + return fmt.Errorf("cannot find disk %v", name) } if stat, err := os.Stat(r.diskPath(name)); err != nil || stat.IsDir() { - return fmt.Errorf("Cannot find disk file %v", name) + return fmt.Errorf("cannot find disk file %v", name) } if stat, err := os.Stat(r.diskPath(name + metadataSuffix)); err != nil || stat.IsDir() { - return fmt.Errorf("Cannot find disk metafile %v", name+metadataSuffix) + return fmt.Errorf("cannot find disk metafile %v", name+metadataSuffix) } disk.Removed = true r.diskData[name] = disk @@ -860,11 +865,11 @@ func (r *Replica) createDisk(name string, userCreated bool, created string, labe log := logrus.WithFields(logrus.Fields{"disk": name}) log.Info("Starting to create disk") if r.readOnly { - return fmt.Errorf("Can not create disk on read-only replica") + return fmt.Errorf("cannot create disk on read-only replica") } if len(r.activeDiskData)+1 > maximumChainLength { - return fmt.Errorf("Too many active disks: %v", len(r.activeDiskData)+1) + return fmt.Errorf("too many active disks: %v", len(r.activeDiskData)+1) } oldHead := r.info.Head @@ -994,7 +999,7 @@ func (r *Replica) openLiveChain() error { } if len(chain) > maximumChainLength { - return fmt.Errorf("Live chain is too long: %v", len(chain)) + return fmt.Errorf("live chain is too long: %v", len(chain)) } for i := len(chain) - 1; i >= 0; i-- { @@ -1027,7 +1032,7 @@ func (r *Replica) readMetadata() (bool, error) { if err := r.unmarshalFile(file.Name(), &r.info); err != nil { return false, err } - r.volume.sectorSize = defaultSectorSize + r.volume.sectorSize = util.VolumeSectorSize r.volume.size = r.info.Size } else if strings.HasSuffix(file.Name(), metadataSuffix) { if err := r.readDiskData(file.Name()); err != nil { @@ -1039,6 +1044,50 @@ func (r *Replica) readMetadata() (bool, error) { return len(r.diskData) > 0, nil } +func (r *Replica) tryRecoverVolumeMetaFile(head string) error { + valid, err := r.checkValidVolumeMetaData() + if err != nil { + return err + } + if valid { + return nil + } + + if head != "" { + r.info.Head = head + } + + if r.info.Head == "" { + files, err := ioutil.ReadDir(r.dir) + if err != nil { + return err + } + for _, file := range files { + if strings.Contains(file.Name(), types.VolumeHeadName) { + r.info.Head = file.Name() + break + } + } + } + + logrus.Warnf("Recovering volume metafile %v, and replica info: %+v", r.diskPath(volumeMetaData), r.info) + return r.writeVolumeMetaData(true, r.info.Rebuilding) +} + +func (r *Replica) checkValidVolumeMetaData() (bool, error) { + err := r.unmarshalFile(r.diskPath(volumeMetaData), &r.info) + if err == nil { + return true, nil + } + + // recover metadata file that does not exist or is empty + if os.IsNotExist(err) || errors.Is(err, io.EOF) { + return false, nil + } + + return false, err +} + func (r *Replica) readDiskData(file string) error { var data disk if err := r.unmarshalFile(file, &data); err != nil { @@ -1114,7 +1163,7 @@ func (r *Replica) Expand(size int64) (err error) { defer r.Unlock() if r.info.Size > size { - return fmt.Errorf("Cannot expand replica to a smaller size %v", size) + return fmt.Errorf("cannot expand replica to a smaller size %v", size) } else if r.info.Size == size { logrus.Infof("Replica had been expanded to size %v", size) return nil @@ -1133,7 +1182,7 @@ func (r *Replica) Expand(size int64) (err error) { func (r *Replica) WriteAt(buf []byte, offset int64) (int, error) { if r.readOnly { - return 0, fmt.Errorf("Can not write on read-only replica") + return 0, fmt.Errorf("cannot write on read-only replica") } r.RLock() diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/revision_counter.go b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/revision_counter.go index f540088ea..d93ed74c3 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/revision_counter.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/revision_counter.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/longhorn/sparse-tools/sparse" @@ -26,11 +27,11 @@ func (r *Replica) readRevisionCounter() (int64, error) { buf := make([]byte, revisionBlockSize) _, err := r.revisionFile.ReadAt(buf, 0) if err != nil && err != io.EOF { - return 0, fmt.Errorf("failed to read from revision counter file: %v", err) + return 0, errors.Wrap(err, "failed to read from revision counter file") } counter, err := strconv.ParseInt(strings.Trim(string(buf), "\x00"), 10, 64) if err != nil { - return 0, fmt.Errorf("failed to parse revision counter file: %v", err) + return 0, errors.Wrap(err, "failed to parse revision counter file") } return counter, nil } @@ -44,7 +45,7 @@ func (r *Replica) writeRevisionCounter(counter int64) error { copy(buf, []byte(strconv.FormatInt(counter, 10))) _, err := r.revisionFile.WriteAt(buf, 0) if err != nil { - return fmt.Errorf("failed to write to revision counter file: %v", err) + return errors.Wrap(err, "failed to write to revision counter file") } return nil } diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/server.go b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/server.go index fef0039ec..113c59077 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/replica/server.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/replica/server.go @@ -25,7 +25,7 @@ type Server struct { sync.RWMutex r *Replica dir string - defaultSectorSize int64 + sectorSize int64 backing *backingfile.BackingFile revisionCounterDisabled bool } @@ -34,7 +34,7 @@ func NewServer(dir string, backing *backingfile.BackingFile, sectorSize int64, d return &Server{ dir: dir, backing: backing, - defaultSectorSize: sectorSize, + sectorSize: sectorSize, revisionCounterDisabled: disableRevCounter, } } @@ -43,7 +43,7 @@ func (s *Server) getSectorSize() int64 { if s.backing != nil && s.backing.SectorSize > 0 { return s.backing.SectorSize } - return s.defaultSectorSize + return s.sectorSize } func (s *Server) Create(size int64) error { @@ -71,7 +71,7 @@ func (s *Server) Open() error { defer s.Unlock() if s.r != nil { - return fmt.Errorf("Replica is already open") + return fmt.Errorf("replica is already open") } _, info := s.Status() @@ -111,7 +111,19 @@ func (s *Server) Status() (State, Info) { info, err := ReadInfo(s.dir) if os.IsNotExist(err) { return Initial, Info{} - } else if err != nil { + } + + replica := Replica{dir: s.dir} + volumeMetaFileValid, vaildErr := replica.checkValidVolumeMetaData() + if vaildErr != nil { + logrus.Errorf("Failed to check if volume metedata is valid in replica directory %s: %v", s.dir, err) + return Error, Info{} + } + if !volumeMetaFileValid { + return Initial, Info{} + } + + if err != nil { logrus.Errorf("Failed to read info in replica directory %s: %v", s.dir, err) return Error, Info{} } @@ -138,7 +150,7 @@ func (s *Server) SetRebuilding(rebuilding bool) error { // Must be Open/Dirty to set true or must be Rebuilding to set false if (rebuilding && state != Open && state != Dirty) || (!rebuilding && state != Rebuilding) { - return fmt.Errorf("Can not set rebuilding=%v from state %s", rebuilding, state) + return fmt.Errorf("cannot set rebuilding=%v from state %s", rebuilding, state) } return s.r.SetRebuilding(rebuilding) @@ -280,7 +292,7 @@ func (s *Server) WriteAt(buf []byte, offset int64) (int, error) { defer s.RUnlock() if s.r == nil { - return 0, fmt.Errorf("Volume no longer exist") + return 0, fmt.Errorf("volume no longer exist") } i, err := s.r.WriteAt(buf, offset) return i, err @@ -291,7 +303,7 @@ func (s *Server) ReadAt(buf []byte, offset int64) (int, error) { defer s.RUnlock() if s.r == nil { - return 0, fmt.Errorf("Volume no longer exist") + return 0, fmt.Errorf("volume no longer exist") } i, err := s.r.ReadAt(buf, offset) return i, err diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/sync/backup.go b/vendor/github.com/longhorn/longhorn-engine/pkg/sync/backup.go index 0cbc12ec7..81ce6bbf5 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/sync/backup.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/sync/backup.go @@ -44,7 +44,7 @@ func (t *Task) CreateBackup(backupName, snapshot, dest, backingImageName, backin var replica *types.ControllerReplicaInfo if snapshot == VolumeHeadName { - return nil, fmt.Errorf("can not backup the head disk in the chain") + return nil, fmt.Errorf("cannot backup the head disk in the chain") } volume, err := t.client.VolumeGet() @@ -175,7 +175,7 @@ func (t *Task) RestoreBackup(backup string, credential map[string]string) error if isRebuilding, err := t.isRebuilding(r); err != nil { taskErr.Append(NewReplicaError(r.Address, err)) } else if isRebuilding { - taskErr.Append(NewReplicaError(r.Address, fmt.Errorf("can not do restore for normal rebuilding replica"))) + taskErr.Append(NewReplicaError(r.Address, fmt.Errorf("cannot do restore for normal rebuilding replica"))) } } if taskErr.HasError() { @@ -284,7 +284,7 @@ func (t *Task) RestoreBackup(backup string, credential map[string]string) error func (t *Task) restoreBackup(replicaInController *types.ControllerReplicaInfo, backup string, snapshotFile string, credential map[string]string) error { if replicaInController.Mode == types.ERR { - return fmt.Errorf("can not restore backup from replica in mode ERR") + return fmt.Errorf("cannot restore backup from replica in mode ERR") } repClient, err := replicaClient.NewReplicaClient(replicaInController.Address) @@ -313,7 +313,7 @@ func (t *Task) Reset() error { return err } else if ok { logrus.Errorf("Replicas are rebuilding. Can't reset: %v", err) - return fmt.Errorf("can not reset Restore info as replica(%s) is rebuilding", r.Address) + return fmt.Errorf("cannot reset Restore info as replica(%s) is rebuilding", r.Address) } } diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/sync/sync.go b/vendor/github.com/longhorn/longhorn-engine/pkg/sync/sync.go index c38ad219d..e053af99b 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/sync/sync.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/sync/sync.go @@ -798,7 +798,7 @@ func CloneSnapshot(engineControllerClient, fromControllerClient *client.Controll } } if sourceReplica == nil { - return fmt.Errorf("cannot find a RW replica in the source volume for clonning") + return fmt.Errorf("cannot find a RW replica in the source volume for cloning") } replicas, err = engineControllerClient.ReplicaList() diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/types/types.go b/vendor/github.com/longhorn/longhorn-engine/pkg/types/types.go index 6209fbf52..83be0a9a5 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/types/types.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/types/types.go @@ -34,6 +34,8 @@ const ( EngineFrontendBlockDev = "tgt-blockdev" EngineFrontendISCSI = "tgt-iscsi" + + VolumeHeadName = "volume-head" ) type ReaderWriterAt interface { diff --git a/vendor/github.com/longhorn/longhorn-engine/pkg/util/util.go b/vendor/github.com/longhorn/longhorn-engine/pkg/util/util.go index ae14350eb..63820678b 100644 --- a/vendor/github.com/longhorn/longhorn-engine/pkg/util/util.go +++ b/vendor/github.com/longhorn/longhorn-engine/pkg/util/util.go @@ -30,12 +30,14 @@ var ( MaximumVolumeNameSize = 64 validVolumeName = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) - cmdTimeout = time.Minute // one minute by default - HostProc = "/host/proc" ) const ( + VolumeSectorSize = 4096 + ReplicaSectorSize = 512 + BackingImageSectorSize = 512 + BlockSizeLinux = 512 ) diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore deleted file mode 100644 index 5d7e88de0..000000000 --- a/vendor/github.com/pierrec/lz4/v4/.gitignore +++ /dev/null @@ -1,36 +0,0 @@ -# Created by https://www.gitignore.io/api/macos - -### macOS ### -*.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -# End of https://www.gitignore.io/api/macos - -cmd/*/*exe -.idea - -fuzz/*.zip diff --git a/vendor/github.com/pierrec/lz4/v4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE deleted file mode 100644 index bd899d835..000000000 --- a/vendor/github.com/pierrec/lz4/v4/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md deleted file mode 100644 index 4629c9d0e..000000000 --- a/vendor/github.com/pierrec/lz4/v4/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# lz4 : LZ4 compression in pure Go - -[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4) -[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions) -[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4) -[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags) - -## Overview - -This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks. -The implementation is based on the reference C [one](https://github.com/lz4/lz4). - -## Install - -Assuming you have the go toolchain installed: - -``` -go get github.com/pierrec/lz4/v4 -``` - -There is a command line interface tool to compress and decompress LZ4 files. - -``` -go install github.com/pierrec/lz4/v4/cmd/lz4c -``` - -Usage - -``` -Usage of lz4c: - -version - print the program version - -Subcommands: -Compress the given files or from stdin to stdout. -compress [arguments] [ ...] - -bc - enable block checksum - -l int - compression level (0=fastest) - -sc - disable stream checksum - -size string - block max size [64K,256K,1M,4M] (default "4M") - -Uncompress the given files or from stdin to stdout. -uncompress [arguments] [ ...] - -``` - - -## Example - -``` -// Compress and uncompress an input string. -s := "hello world" -r := strings.NewReader(s) - -// The pipe will uncompress the data from the writer. -pr, pw := io.Pipe() -zw := lz4.NewWriter(pw) -zr := lz4.NewReader(pr) - -go func() { - // Compress the input string. - _, _ = io.Copy(zw, r) - _ = zw.Close() // Make sure the writer is closed - _ = pw.Close() // Terminate the pipe -}() - -_, _ = io.Copy(os.Stdout, zr) - -// Output: -// hello world -``` - -## Contributing - -Contributions are very welcome for bug fixing, performance improvements...! - -- Open an issue with a proper description -- Send a pull request with appropriate test case(s) - -## Contributors - -Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far! - -Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder. - -Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64. - -Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code. diff --git a/vendor/github.com/pierrec/lz4/v4/go.mod b/vendor/github.com/pierrec/lz4/v4/go.mod deleted file mode 100644 index 42229b296..000000000 --- a/vendor/github.com/pierrec/lz4/v4/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pierrec/lz4/v4 - -go 1.14 diff --git a/vendor/github.com/pierrec/lz4/v4/go.sum b/vendor/github.com/pierrec/lz4/v4/go.sum deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go deleted file mode 100644 index fec8adb03..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go +++ /dev/null @@ -1,481 +0,0 @@ -package lz4block - -import ( - "encoding/binary" - "math/bits" - "sync" - - "github.com/pierrec/lz4/v4/internal/lz4errors" -) - -const ( - // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks - - // hashLog determines the size of the hash table used to quickly find a previous match position. - // Its value influences the compression speed and memory usage, the lower the faster, - // but at the expense of the compression ratio. - // 16 seems to be the best compromise for fast compression. - hashLog = 16 - htSize = 1 << hashLog - - mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes. -) - -func recoverBlock(e *error) { - if r := recover(); r != nil && *e == nil { - *e = lz4errors.ErrInvalidSourceShortBuffer - } -} - -// blockHash hashes the lower 6 bytes into a value < htSize. -func blockHash(x uint64) uint32 { - const prime6bytes = 227718039650203 - return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog)) -} - -func CompressBlockBound(n int) int { - return n + n/255 + 16 -} - -func UncompressBlock(src, dst, dict []byte) (int, error) { - if len(src) == 0 { - return 0, nil - } - if di := decodeBlock(dst, src, dict); di >= 0 { - return di, nil - } - return 0, lz4errors.ErrInvalidSourceShortBuffer -} - -type Compressor struct { - // Offsets are at most 64kiB, so we can store only the lower 16 bits of - // match positions: effectively, an offset from some 64kiB block boundary. - // - // When we retrieve such an offset, we interpret it as relative to the last - // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000, - // depending on which of these is inside the current window. If a table - // entry was generated more than 64kiB back in the input, we find out by - // inspecting the input stream. - table [htSize]uint16 - - // Bitmap indicating which positions in the table are in use. - // This allows us to quickly reset the table for reuse, - // without having to zero everything. - inUse [htSize / 32]uint32 -} - -// Get returns the position of a presumptive match for the hash h. -// The match may be a false positive due to a hash collision or an old entry. -// If si < winSize, the return value may be negative. -func (c *Compressor) get(h uint32, si int) int { - h &= htSize - 1 - i := 0 - if c.inUse[h/32]&(1<<(h%32)) != 0 { - i = int(c.table[h]) - } - i += si &^ winMask - if i >= si { - // Try previous 64kiB block (negative when in first block). - i -= winSize - } - return i -} - -func (c *Compressor) put(h uint32, si int) { - h &= htSize - 1 - c.table[h] = uint16(si) - c.inUse[h/32] |= 1 << (h % 32) -} - -func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} } - -var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }} - -func CompressBlock(src, dst []byte) (int, error) { - c := compressorPool.Get().(*Compressor) - n, err := c.CompressBlock(src, dst) - compressorPool.Put(c) - return n, err -} - -func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { - // Zero out reused table to avoid non-deterministic output (issue #65). - c.reset() - - // Return 0, nil only if the destination buffer size is < CompressBlockBound. - isNotCompressible := len(dst) < CompressBlockBound(len(src)) - - // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compression. - // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) - const adaptSkipLog = 7 - - // si: Current position of the search. - // anchor: Position of the current literals. - var si, di, anchor int - sn := len(src) - mfLimit - if sn <= 0 { - goto lastLiterals - } - - // Fast scan strategy: the hash table only stores the last 4 bytes sequences. - for si < sn { - // Hash the next 6 bytes (sequence)... - match := binary.LittleEndian.Uint64(src[si:]) - h := blockHash(match) - h2 := blockHash(match >> 8) - - // We check a match at s, s+1 and s+2 and pick the first one we get. - // Checking 3 only requires us to load the source one. - ref := c.get(h, si) - ref2 := c.get(h2, si+1) - c.put(h, si) - c.put(h2, si+1) - - offset := si - ref - - if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { - // No match. Start calculating another hash. - // The processor can usually do this out-of-order. - h = blockHash(match >> 16) - ref3 := c.get(h, si+2) - - // Check the second match at si+1 - si += 1 - offset = si - ref2 - - if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) { - // No match. Check the third match at si+2 - si += 1 - offset = si - ref3 - c.put(h, si) - - if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) { - // Skip one extra byte (at si+3) before we check 3 matches again. - si += 2 + (si-anchor)>>adaptSkipLog - continue - } - } - } - - // Match found. - lLen := si - anchor // Literal length. - // We already matched 4 bytes. - mLen := 4 - - // Extend backwards if we can, reducing literals. - tOff := si - offset - 1 - for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] { - si-- - tOff-- - lLen-- - mLen++ - } - - // Add the match length, so we continue search at the end. - // Use mLen to store the offset base. - si, mLen = si+mLen, si+minMatch - - // Find the longest match by looking by batches of 8 bytes. - for si+8 <= sn { - x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:]) - if x == 0 { - si += 8 - } else { - // Stop is first non-zero byte. - si += bits.TrailingZeros64(x) >> 3 - break - } - } - - mLen = si - mLen - if di >= len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF && di < len(dst); l -= 0xFF { - dst[di] = 0xFF - di++ - } - if di >= len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - dst[di] = byte(l) - } - di++ - - // Literals. - if di+lLen > len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - copy(dst[di:di+lLen], src[anchor:anchor+lLen]) - di += lLen + 2 - anchor = si - - // Encode offset. - if di > len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - if di >= len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - dst[di] = byte(mLen) - di++ - } - // Check if we can load next values. - if si >= sn { - break - } - // Hash match end-2 - h = blockHash(binary.LittleEndian.Uint64(src[si-2:])) - c.put(h, si-2) - } - -lastLiterals: - if isNotCompressible && anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. - if di >= len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - if di >= len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if isNotCompressible && di >= anchor { - // Incompressible. - return 0, nil - } - if di+len(src)-anchor > len(dst) { - return 0, lz4errors.ErrInvalidSourceShortBuffer - } - di += copy(dst[di:di+len(src)-anchor], src[anchor:]) - return di, nil -} - -// blockHash hashes 4 bytes into a value < winSize. -func blockHashHC(x uint32) uint32 { - const hasher uint32 = 2654435761 // Knuth multiplicative hash. - return x * hasher >> (32 - winSizeLog) -} - -type CompressorHC struct { - // hashTable: stores the last position found for a given hash - // chainTable: stores previous positions for a given hash - hashTable, chainTable [htSize]int - needsReset bool -} - -var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }} - -func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) { - c := compressorHCPool.Get().(*CompressorHC) - n, err := c.CompressBlock(src, dst, depth) - compressorHCPool.Put(c) - return n, err -} - -func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) { - if c.needsReset { - // Zero out reused table to avoid non-deterministic output (issue #65). - c.hashTable = [htSize]int{} - c.chainTable = [htSize]int{} - } - c.needsReset = true // Only false on first call. - - defer recoverBlock(&err) - - // Return 0, nil only if the destination buffer size is < CompressBlockBound. - isNotCompressible := len(dst) < CompressBlockBound(len(src)) - - // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible. - // This significantly speeds up incompressible data and usually has very small impact on compression. - // bytes to skip = 1 + (bytes since last match >> adaptSkipLog) - const adaptSkipLog = 7 - - var si, di, anchor int - sn := len(src) - mfLimit - if sn <= 0 { - goto lastLiterals - } - - if depth == 0 { - depth = winSize - } - - for si < sn { - // Hash the next 4 bytes (sequence). - match := binary.LittleEndian.Uint32(src[si:]) - h := blockHashHC(match) - - // Follow the chain until out of window and give the longest match. - mLen := 0 - offset := 0 - for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 { - // The first (mLen==0) or next byte (mLen>=minMatch) at current match length - // must match to improve on the match length. - if src[next+mLen] != src[si+mLen] { - continue - } - ml := 0 - // Compare the current position with a previous with the same hash. - for ml < sn-si { - x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:]) - if x == 0 { - ml += 8 - } else { - // Stop is first non-zero byte. - ml += bits.TrailingZeros64(x) >> 3 - break - } - } - if ml < minMatch || ml <= mLen { - // Match too small (>adaptSkipLog - continue - } - - // Match found. - // Update hash/chain tables with overlapping bytes: - // si already hashed, add everything from si+1 up to the match length. - winStart := si + 1 - if ws := si + mLen - winSize; ws > winStart { - winStart = ws - } - for si, ml := winStart, si+mLen; si < ml; { - match >>= 8 - match |= uint32(src[si+3]) << 24 - h := blockHashHC(match) - c.chainTable[si&winMask] = c.hashTable[h] - c.hashTable[h] = si - si++ - } - - lLen := si - anchor - si += mLen - mLen -= minMatch // Match length does not include minMatch. - - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:di+lLen], src[anchor:anchor+lLen]) - di += lLen - anchor = si - - // Encode offset. - di += 2 - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - } - - if isNotCompressible && anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. -lastLiterals: - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if isNotCompressible && di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:di+len(src)-anchor], src[anchor:]) - return di, nil -} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go deleted file mode 100644 index a1bfa99e4..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package lz4block provides LZ4 BlockSize types and pools of buffers. -package lz4block - -import "sync" - -const ( - Block64Kb uint32 = 1 << (16 + iota*2) - Block256Kb - Block1Mb - Block4Mb -) - -// In legacy mode all blocks are compressed regardless -// of the compressed size: use the bound size. -var Block8Mb = uint32(CompressBlockBound(8 << 20)) - -var ( - BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} - BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} - BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }} - BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }} - BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }} -) - -func Index(b uint32) BlockSizeIndex { - switch b { - case Block64Kb: - return 4 - case Block256Kb: - return 5 - case Block1Mb: - return 6 - case Block4Mb: - return 7 - case Block8Mb: // only valid in legacy mode - return 3 - } - return 0 -} - -func IsValid(b uint32) bool { - return Index(b) > 0 -} - -type BlockSizeIndex uint8 - -func (b BlockSizeIndex) IsValid() bool { - switch b { - case 4, 5, 6, 7: - return true - } - return false -} - -func (b BlockSizeIndex) Get() []byte { - var buf interface{} - switch b { - case 4: - buf = BlockPool64K.Get() - case 5: - buf = BlockPool256K.Get() - case 6: - buf = BlockPool1M.Get() - case 7: - buf = BlockPool4M.Get() - case 3: - buf = BlockPool8M.Get() - } - return buf.([]byte) -} - -func Put(buf []byte) { - // Safeguard: do not allow invalid buffers. - switch c := cap(buf); uint32(c) { - case Block64Kb: - BlockPool64K.Put(buf[:c]) - case Block256Kb: - BlockPool256K.Put(buf[:c]) - case Block1Mb: - BlockPool1M.Put(buf[:c]) - case Block4Mb: - BlockPool4M.Put(buf[:c]) - case Block8Mb: - BlockPool8M.Put(buf[:c]) - } -} - -type CompressionLevel uint32 - -const Fast CompressionLevel = 0 diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s deleted file mode 100644 index 1d00133fa..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s +++ /dev/null @@ -1,448 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "go_asm.h" -#include "textflag.h" - -// AX scratch -// BX scratch -// CX literal and match lengths -// DX token, match offset -// -// DI &dst -// SI &src -// R8 &dst + len(dst) -// R9 &src + len(src) -// R11 &dst -// R12 short output end -// R13 short input end -// R14 &dict -// R15 len(dict) - -// func decodeBlock(dst, src, dict []byte) int -TEXT ·decodeBlock(SB), NOSPLIT, $48-80 - MOVQ dst_base+0(FP), DI - MOVQ DI, R11 - MOVQ dst_len+8(FP), R8 - ADDQ DI, R8 - - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R9 - CMPQ R9, $0 - JE err_corrupt - ADDQ SI, R9 - - MOVQ dict_base+48(FP), R14 - MOVQ dict_len+56(FP), R15 - - // shortcut ends - // short output end - MOVQ R8, R12 - SUBQ $32, R12 - // short input end - MOVQ R9, R13 - SUBQ $16, R13 - - XORL CX, CX - -loop: - // token := uint32(src[si]) - MOVBLZX (SI), DX - INCQ SI - - // lit_len = token >> 4 - // if lit_len > 0 - // CX = lit_len - MOVL DX, CX - SHRL $4, CX - - // if lit_len != 0xF - CMPL CX, $0xF - JEQ lit_len_loop - CMPQ DI, R12 - JAE copy_literal - CMPQ SI, R13 - JAE copy_literal - - // copy shortcut - - // A two-stage shortcut for the most common case: - // 1) If the literal length is 0..14, and there is enough space, - // enter the shortcut and copy 16 bytes on behalf of the literals - // (in the fast mode, only 8 bytes can be safely copied this way). - // 2) Further if the match length is 4..18, copy 18 bytes in a similar - // manner; but we ensure that there's enough space in the output for - // those 18 bytes earlier, upon entering the shortcut (in other words, - // there is a combined check for both stages). - - // copy literal - MOVOU (SI), X0 - MOVOU X0, (DI) - ADDQ CX, DI - ADDQ CX, SI - - MOVL DX, CX - ANDL $0xF, CX - - // The second stage: prepare for match copying, decode full info. - // If it doesn't work out, the info won't be wasted. - // offset := uint16(data[:2]) - MOVWLZX (SI), DX - TESTL DX, DX - JE err_corrupt - ADDQ $2, SI - JC err_short_buf - - MOVQ DI, AX - SUBQ DX, AX - JC err_corrupt - CMPQ AX, DI - JA err_short_buf - - // if we can't do the second stage then jump straight to read the - // match length, we already have the offset. - CMPL CX, $0xF - JEQ match_len_loop_pre - CMPL DX, $8 - JLT match_len_loop_pre - CMPQ AX, R11 - JB match_len_loop_pre - - // memcpy(op + 0, match + 0, 8); - MOVQ (AX), BX - MOVQ BX, (DI) - // memcpy(op + 8, match + 8, 8); - MOVQ 8(AX), BX - MOVQ BX, 8(DI) - // memcpy(op +16, match +16, 2); - MOVW 16(AX), BX - MOVW BX, 16(DI) - - LEAQ const_minMatch(DI)(CX*1), DI - - // shortcut complete, load next token - JMP loopcheck - - // Read the rest of the literal length: - // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF). -lit_len_loop: - CMPQ SI, R9 - JAE err_short_buf - - MOVBLZX (SI), BX - INCQ SI - ADDQ BX, CX - - CMPB BX, $0xFF - JE lit_len_loop - -copy_literal: - // bounds check src and dst - MOVQ SI, AX - ADDQ CX, AX - JC err_short_buf - CMPQ AX, R9 - JA err_short_buf - - MOVQ DI, BX - ADDQ CX, BX - JC err_short_buf - CMPQ BX, R8 - JA err_short_buf - - // Copy literals of <=48 bytes through the XMM registers. - CMPQ CX, $48 - JGT memmove_lit - - // if len(dst[di:]) < 48 - MOVQ R8, AX - SUBQ DI, AX - CMPQ AX, $48 - JLT memmove_lit - - // if len(src[si:]) < 48 - MOVQ R9, BX - SUBQ SI, BX - CMPQ BX, $48 - JLT memmove_lit - - MOVOU (SI), X0 - MOVOU 16(SI), X1 - MOVOU 32(SI), X2 - MOVOU X0, (DI) - MOVOU X1, 16(DI) - MOVOU X2, 32(DI) - - ADDQ CX, SI - ADDQ CX, DI - - JMP finish_lit_copy - -memmove_lit: - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - - // Spill registers. Increment SI, DI now so we don't need to save CX. - ADDQ CX, DI - ADDQ CX, SI - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVL DX, 40(SP) - - CALL runtime·memmove(SB) - - // restore registers - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVL 40(SP), DX - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ dict_base+48(FP), R14 - MOVQ dict_len+56(FP), R15 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - -finish_lit_copy: - // CX := mLen - // free up DX to use for offset - MOVL DX, CX - ANDL $0xF, CX - - CMPQ SI, R9 - JAE end - - // offset - // si += 2 - // DX := int(src[si-2]) | int(src[si-1])<<8 - ADDQ $2, SI - JC err_short_buf - CMPQ SI, R9 - JA err_short_buf - MOVWQZX -2(SI), DX - - // 0 offset is invalid - TESTL DX, DX - JEQ err_corrupt - -match_len_loop_pre: - // if mlen != 0xF - CMPB CX, $0xF - JNE copy_match - - // do { BX = src[si++]; mlen += BX } while (BX == 0xFF). -match_len_loop: - CMPQ SI, R9 - JAE err_short_buf - - MOVBLZX (SI), BX - INCQ SI - ADDQ BX, CX - - CMPB BX, $0xFF - JE match_len_loop - -copy_match: - ADDQ $const_minMatch, CX - - // check we have match_len bytes left in dst - // di+match_len < len(dst) - MOVQ DI, AX - ADDQ CX, AX - JC err_short_buf - CMPQ AX, R8 - JA err_short_buf - - // DX = offset - // CX = match_len - // BX = &dst + (di - offset) - MOVQ DI, BX - SUBQ DX, BX - - // check BX is within dst - // if BX < &dst - JC copy_match_from_dict - CMPQ BX, R11 - JBE copy_match_from_dict - - // if offset + match_len < di - LEAQ (BX)(CX*1), AX - CMPQ DI, AX - JA copy_interior_match - - // AX := len(dst[:di]) - // MOVQ DI, AX - // SUBQ R11, AX - - // copy 16 bytes at a time - // if di-offset < 16 copy 16-(di-offset) bytes to di - // then do the remaining - -copy_match_loop: - // for match_len >= 0 - // dst[di] = dst[i] - // di++ - // i++ - MOVB (BX), AX - MOVB AX, (DI) - INCQ DI - INCQ BX - DECQ CX - JNZ copy_match_loop - - JMP loopcheck - -copy_interior_match: - CMPQ CX, $16 - JGT memmove_match - - // if len(dst[di:]) < 16 - MOVQ R8, AX - SUBQ DI, AX - CMPQ AX, $16 - JLT memmove_match - - MOVOU (BX), X0 - MOVOU X0, (DI) - - ADDQ CX, DI - XORL CX, CX - JMP loopcheck - -copy_match_from_dict: - // CX = match_len - // BX = &dst + (di - offset) - - // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary - MOVQ R11, AX - SUBQ BX, AX - - // BX = len(dict) - dict_bytes_available - MOVQ R15, BX - SUBQ AX, BX - JS err_short_dict - - ADDQ R14, BX - - // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy - CMPQ CX, AX - JLT memmove_match - - // The match stretches over the dictionary and our block - // 1) copy what comes from the dictionary - // AX = dict_bytes_available = copy_size - // BX = &dict_end - copy_size - // CX = match_len - - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ BX, 8(SP) - MOVQ AX, 16(SP) - // store extra stuff we want to recover - // spill - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // restore registers - MOVQ 16(SP), AX // copy_size - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX // match_len - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 // TODO: make these sensible numbers - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ dict_base+48(FP), R14 - MOVQ dict_len+56(FP), R15 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - - // di+=copy_size - ADDQ AX, DI - - // 2) copy the rest from the current block - // CX = match_len - copy_size = rest_size - SUBQ AX, CX - MOVQ R11, BX - - // check if we have a copy overlap - // AX = &dst + rest_size - MOVQ CX, AX - ADDQ BX, AX - // if &dst + rest_size > di, copy byte by byte - CMPQ AX, DI - - JA copy_match_loop - -memmove_match: - // memmove(to, from, len) - MOVQ DI, 0(SP) - MOVQ BX, 8(SP) - MOVQ CX, 16(SP) - - // Spill registers. Increment DI now so we don't need to save CX. - ADDQ CX, DI - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - - CALL runtime·memmove(SB) - - // restore registers - MOVQ 24(SP), DI - MOVQ 32(SP), SI - - // recalc initial values - MOVQ dst_base+0(FP), R8 - MOVQ R8, R11 // TODO: make these sensible numbers - ADDQ dst_len+8(FP), R8 - MOVQ src_base+24(FP), R9 - ADDQ src_len+32(FP), R9 - MOVQ R8, R12 - SUBQ $32, R12 - MOVQ R9, R13 - SUBQ $16, R13 - MOVQ dict_base+48(FP), R14 - MOVQ dict_len+56(FP), R15 - XORL CX, CX - -loopcheck: - // for si < len(src) - CMPQ SI, R9 - JB loop - -end: - // Remaining length must be zero. - TESTQ CX, CX - JNE err_corrupt - - SUBQ R11, DI - MOVQ DI, ret+72(FP) - RET - -err_corrupt: - MOVQ $-1, ret+72(FP) - RET - -err_short_buf: - MOVQ $-2, ret+72(FP) - RET - -err_short_dict: - MOVQ $-3, ret+72(FP) - RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s deleted file mode 100644 index 20b21fcf1..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s +++ /dev/null @@ -1,231 +0,0 @@ -// +build gc -// +build !noasm - -#include "go_asm.h" -#include "textflag.h" - -// Register allocation. -#define dst R0 -#define dstorig R1 -#define src R2 -#define dstend R3 -#define srcend R4 -#define match R5 // Match address. -#define dictend R6 -#define token R7 -#define len R8 // Literal and match lengths. -#define offset R7 // Match offset; overlaps with token. -#define tmp1 R9 -#define tmp2 R11 -#define tmp3 R12 - -// func decodeBlock(dst, src, dict []byte) int -TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40 - MOVW dst_base +0(FP), dst - MOVW dst_len +4(FP), dstend - MOVW src_base +12(FP), src - MOVW src_len +16(FP), srcend - - CMP $0, srcend - BEQ shortSrc - - ADD dst, dstend - ADD src, srcend - - MOVW dst, dstorig - -loop: - // Read token. Extract literal length. - MOVBU.P 1(src), token - MOVW token >> 4, len - CMP $15, len - BNE readLitlenDone - -readLitlenLoop: - CMP src, srcend - BEQ shortSrc - MOVBU.P 1(src), tmp1 - ADD.S tmp1, len - BVS shortDst - CMP $255, tmp1 - BEQ readLitlenLoop - -readLitlenDone: - CMP $0, len - BEQ copyLiteralDone - - // Bounds check dst+len and src+len. - ADD.S dst, len, tmp1 - ADD.CC.S src, len, tmp2 - BCS shortSrc - CMP dstend, tmp1 - //BHI shortDst // Uncomment for distinct error codes. - CMP.LS srcend, tmp2 - BHI shortSrc - - // Copy literal. - CMP $4, len - BLO copyLiteralFinish - - // Copy 0-3 bytes until src is aligned. - TST $1, src - MOVBU.NE.P 1(src), tmp1 - MOVB.NE.P tmp1, 1(dst) - SUB.NE $1, len - - TST $2, src - MOVHU.NE.P 2(src), tmp2 - MOVB.NE.P tmp2, 1(dst) - MOVW.NE tmp2 >> 8, tmp1 - MOVB.NE.P tmp1, 1(dst) - SUB.NE $2, len - - B copyLiteralLoopCond - -copyLiteralLoop: - // Aligned load, unaligned write. - MOVW.P 4(src), tmp1 - MOVW tmp1 >> 8, tmp2 - MOVB tmp2, 1(dst) - MOVW tmp1 >> 16, tmp3 - MOVB tmp3, 2(dst) - MOVW tmp1 >> 24, tmp2 - MOVB tmp2, 3(dst) - MOVB.P tmp1, 4(dst) -copyLiteralLoopCond: - // Loop until len-4 < 0. - SUB.S $4, len - BPL copyLiteralLoop - -copyLiteralFinish: - // Copy remaining 0-3 bytes. - // At this point, len may be < 0, but len&3 is still accurate. - TST $1, len - MOVB.NE.P 1(src), tmp3 - MOVB.NE.P tmp3, 1(dst) - TST $2, len - MOVB.NE.P 2(src), tmp1 - MOVB.NE.P tmp1, 2(dst) - MOVB.NE -1(src), tmp2 - MOVB.NE tmp2, -1(dst) - -copyLiteralDone: - // Initial part of match length. - // This frees up the token register for reuse as offset. - AND $15, token, len - - CMP src, srcend - BEQ end - - // Read offset. - ADD.S $2, src - BCS shortSrc - CMP srcend, src - BHI shortSrc - MOVBU -2(src), offset - MOVBU -1(src), tmp1 - ORR.S tmp1 << 8, offset - BEQ corrupt - - // Read rest of match length. - CMP $15, len - BNE readMatchlenDone - -readMatchlenLoop: - CMP src, srcend - BEQ shortSrc - MOVBU.P 1(src), tmp1 - ADD.S tmp1, len - BVS shortDst - CMP $255, tmp1 - BEQ readMatchlenLoop - -readMatchlenDone: - // Bounds check dst+len+minMatch. - ADD.S dst, len, tmp1 - ADD.CC.S $const_minMatch, tmp1 - BCS shortDst - CMP dstend, tmp1 - BHI shortDst - - RSB dst, offset, match - CMP dstorig, match - BGE copyMatch4 - - // match < dstorig means the match starts in the dictionary, - // at len(dict) - offset + (dst - dstorig). - MOVW dict_base+24(FP), match - MOVW dict_len +28(FP), dictend - - ADD $const_minMatch, len - - RSB dst, dstorig, tmp1 - RSB dictend, offset, tmp2 - ADD.S tmp2, tmp1 - BMI shortDict - ADD match, dictend - ADD tmp1, match - -copyDict: - MOVBU.P 1(match), tmp1 - MOVB.P tmp1, 1(dst) - SUB.S $1, len - CMP.NE match, dictend - BNE copyDict - - // If the match extends beyond the dictionary, the rest is at dstorig. - CMP $0, len - BEQ copyMatchDone - MOVW dstorig, match - B copyMatch - - // Copy a regular match. - // Since len+minMatch is at least four, we can do a 4× unrolled - // byte copy loop. Using MOVW instead of four byte loads is faster, - // but to remain portable we'd have to align match first, which is - // too expensive. By alternating loads and stores, we also handle - // the case offset < 4. -copyMatch4: - SUB.S $4, len - MOVBU.P 4(match), tmp1 - MOVB.P tmp1, 4(dst) - MOVBU -3(match), tmp2 - MOVB tmp2, -3(dst) - MOVBU -2(match), tmp3 - MOVB tmp3, -2(dst) - MOVBU -1(match), tmp1 - MOVB tmp1, -1(dst) - BPL copyMatch4 - - // Restore len, which is now negative. - ADD.S $4, len - BEQ copyMatchDone - -copyMatch: - // Finish with a byte-at-a-time copy. - SUB.S $1, len - MOVBU.P 1(match), tmp2 - MOVB.P tmp2, 1(dst) - BNE copyMatch - -copyMatchDone: - CMP src, srcend - BNE loop - -end: - CMP $0, len - BNE corrupt - SUB dstorig, dst, tmp1 - MOVW tmp1, ret+36(FP) - RET - - // The error cases have distinct labels so we can put different - // return codes here when debugging, or if the error returns need to - // be changed. -shortDict: -shortDst: -shortSrc: -corrupt: - MOVW $-1, tmp1 - MOVW tmp1, ret+36(FP) - RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s deleted file mode 100644 index c43e8a8d2..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s +++ /dev/null @@ -1,230 +0,0 @@ -// +build gc -// +build !noasm - -// This implementation assumes that strict alignment checking is turned off. -// The Go compiler makes the same assumption. - -#include "go_asm.h" -#include "textflag.h" - -// Register allocation. -#define dst R0 -#define dstorig R1 -#define src R2 -#define dstend R3 -#define dstend16 R4 // dstend - 16 -#define srcend R5 -#define srcend16 R6 // srcend - 16 -#define match R7 // Match address. -#define dict R8 -#define dictlen R9 -#define dictend R10 -#define token R11 -#define len R12 // Literal and match lengths. -#define lenRem R13 -#define offset R14 // Match offset. -#define tmp1 R15 -#define tmp2 R16 -#define tmp3 R17 -#define tmp4 R19 - -// func decodeBlock(dst, src, dict []byte) int -TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80 - LDP dst_base+0(FP), (dst, dstend) - ADD dst, dstend - MOVD dst, dstorig - - LDP src_base+24(FP), (src, srcend) - CBZ srcend, shortSrc - ADD src, srcend - - // dstend16 = max(dstend-16, 0) and similarly for srcend16. - SUBS $16, dstend, dstend16 - CSEL LO, ZR, dstend16, dstend16 - SUBS $16, srcend, srcend16 - CSEL LO, ZR, srcend16, srcend16 - - LDP dict_base+48(FP), (dict, dictlen) - ADD dict, dictlen, dictend - -loop: - // Read token. Extract literal length. - MOVBU.P 1(src), token - LSR $4, token, len - CMP $15, len - BNE readLitlenDone - -readLitlenLoop: - CMP src, srcend - BEQ shortSrc - MOVBU.P 1(src), tmp1 - ADDS tmp1, len - BVS shortDst - CMP $255, tmp1 - BEQ readLitlenLoop - -readLitlenDone: - CBZ len, copyLiteralDone - - // Bounds check dst+len and src+len. - ADDS dst, len, tmp1 - BCS shortSrc - ADDS src, len, tmp2 - BCS shortSrc - CMP dstend, tmp1 - BHI shortDst - CMP srcend, tmp2 - BHI shortSrc - - // Copy literal. - SUBS $16, len - BLO copyLiteralShort - -copyLiteralLoop: - LDP.P 16(src), (tmp1, tmp2) - STP.P (tmp1, tmp2), 16(dst) - SUBS $16, len - BPL copyLiteralLoop - - // Copy (final part of) literal of length 0-15. - // If we have >=16 bytes left in src and dst, just copy 16 bytes. -copyLiteralShort: - CMP dstend16, dst - CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO). - BHS copyLiteralShortEnd - - AND $15, len - - LDP (src), (tmp1, tmp2) - ADD len, src - STP (tmp1, tmp2), (dst) - ADD len, dst - - B copyLiteralDone - - // Safe but slow copy near the end of src, dst. -copyLiteralShortEnd: - TBZ $3, len, 3(PC) - MOVD.P 8(src), tmp1 - MOVD.P tmp1, 8(dst) - TBZ $2, len, 3(PC) - MOVW.P 4(src), tmp2 - MOVW.P tmp2, 4(dst) - TBZ $1, len, 3(PC) - MOVH.P 2(src), tmp3 - MOVH.P tmp3, 2(dst) - TBZ $0, len, 3(PC) - MOVBU.P 1(src), tmp4 - MOVB.P tmp4, 1(dst) - -copyLiteralDone: - // Initial part of match length. - AND $15, token, len - - CMP src, srcend - BEQ end - - // Read offset. - ADDS $2, src - BCS shortSrc - CMP srcend, src - BHI shortSrc - MOVHU -2(src), offset - CBZ offset, corrupt - - // Read rest of match length. - CMP $15, len - BNE readMatchlenDone - -readMatchlenLoop: - CMP src, srcend - BEQ shortSrc - MOVBU.P 1(src), tmp1 - ADDS tmp1, len - BVS shortDst - CMP $255, tmp1 - BEQ readMatchlenLoop - -readMatchlenDone: - ADD $const_minMatch, len - - // Bounds check dst+len. - ADDS dst, len, tmp2 - BCS shortDst - CMP dstend, tmp2 - BHI shortDst - - SUB offset, dst, match - CMP dstorig, match - BHS copyMatchTry8 - - // match < dstorig means the match starts in the dictionary, - // at len(dict) - offset + (dst - dstorig). - SUB dstorig, dst, tmp1 - SUB offset, dictlen, tmp2 - ADDS tmp2, tmp1 - BMI shortDict - ADD dict, tmp1, match - -copyDict: - MOVBU.P 1(match), tmp3 - MOVB.P tmp3, 1(dst) - SUBS $1, len - CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag. - BNE copyDict - - CBZ len, copyMatchDone - - // If the match extends beyond the dictionary, the rest is at dstorig. - // Recompute the offset for the next check. - MOVD dstorig, match - SUB dstorig, dst, offset - -copyMatchTry8: - // Copy doublewords if both len and offset are at least eight. - // A 16-at-a-time loop doesn't provide a further speedup. - CMP $8, len - CCMP HS, offset, $8, $0 - BLO copyMatchLoop1 - - AND $7, len, lenRem - SUB $8, len -copyMatchLoop8: - MOVD.P 8(match), tmp1 - MOVD.P tmp1, 8(dst) - SUBS $8, len - BPL copyMatchLoop8 - - MOVD (match)(len), tmp2 // match+len == match+lenRem-8. - ADD lenRem, dst - MOVD $0, len - MOVD tmp2, -8(dst) - B copyMatchDone - -copyMatchLoop1: - // Byte-at-a-time copy for small offsets. - MOVBU.P 1(match), tmp2 - MOVB.P tmp2, 1(dst) - SUBS $1, len - BNE copyMatchLoop1 - -copyMatchDone: - CMP src, srcend - BNE loop - -end: - CBNZ len, corrupt - SUB dstorig, dst, tmp1 - MOVD tmp1, ret+72(FP) - RET - - // The error cases have distinct labels so we can put different - // return codes here when debugging, or if the error returns need to - // be changed. -shortDict: -shortDst: -shortSrc: -corrupt: - MOVD $-1, tmp1 - MOVD tmp1, ret+72(FP) - RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go deleted file mode 100644 index 8d9023d10..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm -// +build amd64 arm arm64 -// +build !appengine -// +build gc -// +build !noasm - -package lz4block - -//go:noescape -func decodeBlock(dst, src, dict []byte) int diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go deleted file mode 100644 index 9f568fbb1..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go +++ /dev/null @@ -1,139 +0,0 @@ -//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm -// +build !amd64,!arm,!arm64 appengine !gc noasm - -package lz4block - -import ( - "encoding/binary" -) - -func decodeBlock(dst, src, dict []byte) (ret int) { - // Restrict capacities so we don't read or write out of bounds. - dst = dst[:len(dst):len(dst)] - src = src[:len(src):len(src)] - - const hasError = -2 - - if len(src) == 0 { - return hasError - } - - defer func() { - if recover() != nil { - ret = hasError - } - }() - - var si, di uint - for si < uint(len(src)) { - // Literals and match lengths (token). - b := uint(src[si]) - si++ - - // Literals. - if lLen := b >> 4; lLen > 0 { - switch { - case lLen < 0xF && si+16 < uint(len(src)): - // Shortcut 1 - // if we have enough room in src and dst, and the literals length - // is small enough (0..14) then copy all 16 bytes, even if not all - // are part of the literals. - copy(dst[di:], src[si:si+16]) - si += lLen - di += lLen - if mLen := b & 0xF; mLen < 0xF { - // Shortcut 2 - // if the match length (4..18) fits within the literals, then copy - // all 18 bytes, even if not all are part of the literals. - mLen += 4 - if offset := u16(src[si:]); mLen <= offset && offset < di { - i := di - offset - // The remaining buffer may not hold 18 bytes. - // See https://github.com/pierrec/lz4/issues/51. - if end := i + 18; end <= uint(len(dst)) { - copy(dst[di:], dst[i:end]) - si += 2 - di += mLen - continue - } - } - } - case lLen == 0xF: - for { - x := uint(src[si]) - if lLen += x; int(lLen) < 0 { - return hasError - } - si++ - if x != 0xFF { - break - } - } - fallthrough - default: - copy(dst[di:di+lLen], src[si:si+lLen]) - si += lLen - di += lLen - } - } - - mLen := b & 0xF - if si == uint(len(src)) && mLen == 0 { - break - } else if si >= uint(len(src)) { - return hasError - } - - offset := u16(src[si:]) - if offset == 0 { - return hasError - } - si += 2 - - // Match. - mLen += minMatch - if mLen == minMatch+0xF { - for { - x := uint(src[si]) - if mLen += x; int(mLen) < 0 { - return hasError - } - si++ - if x != 0xFF { - break - } - } - } - - // Copy the match. - if di < offset { - // The match is beyond our block, meaning the first part - // is in the dictionary. - fromDict := dict[uint(len(dict))+di-offset:] - n := uint(copy(dst[di:di+mLen], fromDict)) - di += n - if mLen -= n; mLen == 0 { - continue - } - // We copied n = offset-di bytes from the dictionary, - // then set di = di+n = offset, so the following code - // copies from dst[di-offset:] = dst[0:]. - } - - expanded := dst[di-offset:] - if mLen > offset { - // Efficiently copy the match dst[di-offset:di] into the dst slice. - bytesToCopy := offset * (mLen / offset) - for n := offset; n <= bytesToCopy+offset; n *= 2 { - copy(expanded[n:], expanded[:n]) - } - di += bytesToCopy - mLen -= bytesToCopy - } - di += uint(copy(dst[di:di+mLen], expanded[:mLen])) - } - - return int(di) -} - -func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) } diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go deleted file mode 100644 index 710ea4281..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package lz4errors - -type Error string - -func (e Error) Error() string { return string(e) } - -const ( - ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short" - ErrInvalidFrame Error = "lz4: bad magic number" - ErrInternalUnhandledState Error = "lz4: unhandled state" - ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum" - ErrInvalidBlockChecksum Error = "lz4: invalid block checksum" - ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum" - ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level" - ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object" - ErrOptionInvalidBlockSize Error = "lz4: invalid block size" - ErrOptionNotApplicable Error = "lz4: option not applicable" - ErrWriterNotClosed Error = "lz4: writer not closed" -) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go deleted file mode 100644 index 459086f09..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ /dev/null @@ -1,350 +0,0 @@ -package lz4stream - -import ( - "encoding/binary" - "fmt" - "io" - "sync" - - "github.com/pierrec/lz4/v4/internal/lz4block" - "github.com/pierrec/lz4/v4/internal/lz4errors" - "github.com/pierrec/lz4/v4/internal/xxh32" -) - -type Blocks struct { - Block *FrameDataBlock - Blocks chan chan *FrameDataBlock - mu sync.Mutex - err error -} - -func (b *Blocks) initW(f *Frame, dst io.Writer, num int) { - if num == 1 { - b.Blocks = nil - b.Block = NewFrameDataBlock(f) - return - } - b.Block = nil - if cap(b.Blocks) != num { - b.Blocks = make(chan chan *FrameDataBlock, num) - } - // goroutine managing concurrent block compression goroutines. - go func() { - // Process next block compression item. - for c := range b.Blocks { - // Read the next compressed block result. - // Waiting here ensures that the blocks are output in the order they were sent. - // The incoming channel is always closed as it indicates to the caller that - // the block has been processed. - block := <-c - if block == nil { - // Notify the block compression routine that we are done with its result. - // This is used when a sentinel block is sent to terminate the compression. - close(c) - return - } - // Do not attempt to write the block upon any previous failure. - if b.err == nil { - // Write the block. - if err := block.Write(f, dst); err != nil { - // Keep the first error. - b.err = err - // All pending compression goroutines need to shut down, so we need to keep going. - } - } - close(c) - } - }() -} - -func (b *Blocks) close(f *Frame, num int) error { - if num == 1 { - if b.Block != nil { - b.Block.Close(f) - } - err := b.err - b.err = nil - return err - } - if b.Blocks == nil { - err := b.err - b.err = nil - return err - } - c := make(chan *FrameDataBlock) - b.Blocks <- c - c <- nil - <-c - err := b.err - b.err = nil - return err -} - -// ErrorR returns any error set while uncompressing a stream. -func (b *Blocks) ErrorR() error { - b.mu.Lock() - defer b.mu.Unlock() - return b.err -} - -// initR returns a channel that streams the uncompressed blocks if in concurrent -// mode and no error. When the channel is closed, check for any error with b.ErrorR. -// -// If not in concurrent mode, the uncompressed block is b.Block and the returned error -// needs to be checked. -func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) { - size := f.Descriptor.Flags.BlockSizeIndex() - if num == 1 { - b.Blocks = nil - b.Block = NewFrameDataBlock(f) - return nil, nil - } - b.Block = nil - blocks := make(chan chan []byte, num) - // data receives the uncompressed blocks. - data := make(chan []byte) - // Read blocks from the source sequentially - // and uncompress them concurrently. - - // In legacy mode, accrue the uncompress sizes in cum. - var cum uint32 - go func() { - var cumx uint32 - var err error - for b.ErrorR() == nil { - block := NewFrameDataBlock(f) - cumx, err = block.Read(f, src, 0) - if err != nil { - block.Close(f) - break - } - // Recheck for an error as reading may be slow and uncompressing is expensive. - if b.ErrorR() != nil { - block.Close(f) - break - } - c := make(chan []byte) - blocks <- c - go func() { - defer block.Close(f) - data, err := block.Uncompress(f, size.Get(), nil, false) - if err != nil { - b.closeR(err) - // Close the block channel to indicate an error. - close(c) - } else { - c <- data - } - }() - } - // End the collection loop and the data channel. - c := make(chan []byte) - blocks <- c - c <- nil // signal the collection loop that we are done - <-c // wait for the collect loop to complete - if f.isLegacy() && cum == cumx { - err = io.EOF - } - b.closeR(err) - close(data) - }() - // Collect the uncompressed blocks and make them available - // on the returned channel. - go func(leg bool) { - defer close(blocks) - skipBlocks := false - for c := range blocks { - buf, ok := <-c - if !ok { - // A closed channel indicates an error. - // All remaining channels should be discarded. - skipBlocks = true - continue - } - if buf == nil { - // Signal to end the loop. - close(c) - return - } - if skipBlocks { - // A previous error has occurred, skipping remaining channels. - continue - } - // Perform checksum now as the blocks are received in order. - if f.Descriptor.Flags.ContentChecksum() { - _, _ = f.checksum.Write(buf) - } - if leg { - cum += uint32(len(buf)) - } - data <- buf - close(c) - } - }(f.isLegacy()) - return data, nil -} - -// closeR safely sets the error on b if not already set. -func (b *Blocks) closeR(err error) { - b.mu.Lock() - if b.err == nil { - b.err = err - } - b.mu.Unlock() -} - -func NewFrameDataBlock(f *Frame) *FrameDataBlock { - buf := f.Descriptor.Flags.BlockSizeIndex().Get() - return &FrameDataBlock{Data: buf, data: buf} -} - -type FrameDataBlock struct { - Size DataBlockSize - Data []byte // compressed or uncompressed data (.data or .src) - Checksum uint32 - data []byte // buffer for compressed data - src []byte // uncompressed data - err error // used in concurrent mode -} - -func (b *FrameDataBlock) Close(f *Frame) { - b.Size = 0 - b.Checksum = 0 - b.err = nil - if b.data != nil { - // Block was not already closed. - lz4block.Put(b.data) - b.Data = nil - b.data = nil - b.src = nil - } -} - -// Block compression errors are ignored since the buffer is sized appropriately. -func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { - data := b.data - if f.isLegacy() { - // In legacy mode, the buffer is sized according to CompressBlockBound, - // but only 8Mb is buffered for compression. - src = src[:8<<20] - } else { - data = data[:len(src)] // trigger the incompressible flag in CompressBlock - } - var n int - switch level { - case lz4block.Fast: - n, _ = lz4block.CompressBlock(src, data) - default: - n, _ = lz4block.CompressBlockHC(src, data, level) - } - if n == 0 { - b.Size.UncompressedSet(true) - b.Data = src - } else { - b.Size.UncompressedSet(false) - b.Data = data[:n] - } - b.Size.sizeSet(len(b.Data)) - b.src = src // keep track of the source for content checksum - - if f.Descriptor.Flags.BlockChecksum() { - b.Checksum = xxh32.ChecksumZero(src) - } - return b -} - -func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error { - // Write is called in the same order as blocks are compressed, - // so content checksum must be done here. - if f.Descriptor.Flags.ContentChecksum() { - _, _ = f.checksum.Write(b.src) - } - buf := f.buf[:] - binary.LittleEndian.PutUint32(buf, uint32(b.Size)) - if _, err := dst.Write(buf[:4]); err != nil { - return err - } - - if _, err := dst.Write(b.Data); err != nil { - return err - } - - if b.Checksum == 0 { - return nil - } - binary.LittleEndian.PutUint32(buf, b.Checksum) - _, err := dst.Write(buf[:4]) - return err -} - -// Read updates b with the next block data, size and checksum if available. -func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) { - x, err := f.readUint32(src) - if err != nil { - return 0, err - } - if f.isLegacy() { - switch x { - case frameMagicLegacy: - // Concatenated legacy frame. - return b.Read(f, src, cum) - case cum: - // Only works in non concurrent mode, for concurrent mode - // it is handled separately. - // Linux kernel format appends the total uncompressed size at the end. - return 0, io.EOF - } - } else if x == 0 { - // Marker for end of stream. - return 0, io.EOF - } - b.Size = DataBlockSize(x) - - size := b.Size.size() - if size > cap(b.data) { - return x, lz4errors.ErrOptionInvalidBlockSize - } - b.data = b.data[:size] - if _, err := io.ReadFull(src, b.data); err != nil { - return x, err - } - if f.Descriptor.Flags.BlockChecksum() { - sum, err := f.readUint32(src) - if err != nil { - return 0, err - } - b.Checksum = sum - } - return x, nil -} - -func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) { - if b.Size.Uncompressed() { - n := copy(dst, b.data) - dst = dst[:n] - } else { - n, err := lz4block.UncompressBlock(b.data, dst, dict) - if err != nil { - return nil, err - } - dst = dst[:n] - } - if f.Descriptor.Flags.BlockChecksum() { - if c := xxh32.ChecksumZero(dst); c != b.Checksum { - err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) - return nil, err - } - } - if sum && f.Descriptor.Flags.ContentChecksum() { - _, _ = f.checksum.Write(dst) - } - return dst, nil -} - -func (f *Frame) readUint32(r io.Reader) (x uint32, err error) { - if _, err = io.ReadFull(r, f.buf[:4]); err != nil { - return - } - x = binary.LittleEndian.Uint32(f.buf[:4]) - return -} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go deleted file mode 100644 index 18192a943..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go +++ /dev/null @@ -1,204 +0,0 @@ -// Package lz4stream provides the types that support reading and writing LZ4 data streams. -package lz4stream - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - - "github.com/pierrec/lz4/v4/internal/lz4block" - "github.com/pierrec/lz4/v4/internal/lz4errors" - "github.com/pierrec/lz4/v4/internal/xxh32" -) - -//go:generate go run gen.go - -const ( - frameMagic uint32 = 0x184D2204 - frameSkipMagic uint32 = 0x184D2A50 - frameMagicLegacy uint32 = 0x184C2102 -) - -func NewFrame() *Frame { - return &Frame{} -} - -type Frame struct { - buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes - Magic uint32 - Descriptor FrameDescriptor - Blocks Blocks - Checksum uint32 - checksum xxh32.XXHZero -} - -// Reset allows reusing the Frame. -// The Descriptor configuration is not modified. -func (f *Frame) Reset(num int) { - f.Magic = 0 - f.Descriptor.Checksum = 0 - f.Descriptor.ContentSize = 0 - _ = f.Blocks.close(f, num) - f.Checksum = 0 -} - -func (f *Frame) InitW(dst io.Writer, num int, legacy bool) { - if legacy { - f.Magic = frameMagicLegacy - idx := lz4block.Index(lz4block.Block8Mb) - f.Descriptor.Flags.BlockSizeIndexSet(idx) - } else { - f.Magic = frameMagic - f.Descriptor.initW() - } - f.Blocks.initW(f, dst, num) - f.checksum.Reset() -} - -func (f *Frame) CloseW(dst io.Writer, num int) error { - if err := f.Blocks.close(f, num); err != nil { - return err - } - if f.isLegacy() { - return nil - } - buf := f.buf[:0] - // End mark (data block size of uint32(0)). - buf = append(buf, 0, 0, 0, 0) - if f.Descriptor.Flags.ContentChecksum() { - buf = f.checksum.Sum(buf) - } - _, err := dst.Write(buf) - return err -} - -func (f *Frame) isLegacy() bool { - return f.Magic == frameMagicLegacy -} - -func (f *Frame) ParseHeaders(src io.Reader) error { - if f.Magic > 0 { - // Header already read. - return nil - } - -newFrame: - var err error - if f.Magic, err = f.readUint32(src); err != nil { - return err - } - switch m := f.Magic; { - case m == frameMagic || m == frameMagicLegacy: - // All 16 values of frameSkipMagic are valid. - case m>>8 == frameSkipMagic>>8: - skip, err := f.readUint32(src) - if err != nil { - return err - } - if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil { - return err - } - goto newFrame - default: - return lz4errors.ErrInvalidFrame - } - if err := f.Descriptor.initR(f, src); err != nil { - return err - } - f.checksum.Reset() - return nil -} - -func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) { - return f.Blocks.initR(f, num, src) -} - -func (f *Frame) CloseR(src io.Reader) (err error) { - if f.isLegacy() { - return nil - } - if !f.Descriptor.Flags.ContentChecksum() { - return nil - } - if f.Checksum, err = f.readUint32(src); err != nil { - return err - } - if c := f.checksum.Sum32(); c != f.Checksum { - return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum) - } - return nil -} - -type FrameDescriptor struct { - Flags DescriptorFlags - ContentSize uint64 - Checksum uint8 -} - -func (fd *FrameDescriptor) initW() { - fd.Flags.VersionSet(1) - fd.Flags.BlockIndependenceSet(true) -} - -func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error { - if fd.Checksum > 0 { - // Header already written. - return nil - } - - buf := f.buf[:4] - // Write the magic number here even though it belongs to the Frame. - binary.LittleEndian.PutUint32(buf, f.Magic) - if !f.isLegacy() { - buf = buf[:4+2] - binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags)) - - if fd.Flags.Size() { - buf = buf[:4+2+8] - binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize) - } - fd.Checksum = descriptorChecksum(buf[4:]) - buf = append(buf, fd.Checksum) - } - - _, err := dst.Write(buf) - return err -} - -func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error { - if f.isLegacy() { - idx := lz4block.Index(lz4block.Block8Mb) - f.Descriptor.Flags.BlockSizeIndexSet(idx) - return nil - } - // Read the flags and the checksum, hoping that there is not content size. - buf := f.buf[:3] - if _, err := io.ReadFull(src, buf); err != nil { - return err - } - descr := binary.LittleEndian.Uint16(buf) - fd.Flags = DescriptorFlags(descr) - if fd.Flags.Size() { - // Append the 8 missing bytes. - buf = buf[:3+8] - if _, err := io.ReadFull(src, buf[3:]); err != nil { - return err - } - fd.ContentSize = binary.LittleEndian.Uint64(buf[2:]) - } - fd.Checksum = buf[len(buf)-1] // the checksum is the last byte - buf = buf[:len(buf)-1] // all descriptor fields except checksum - if c := descriptorChecksum(buf); fd.Checksum != c { - return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum) - } - // Validate the elements that can be. - if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() { - return lz4errors.ErrOptionInvalidBlockSize - } - return nil -} - -func descriptorChecksum(buf []byte) byte { - return byte(xxh32.ChecksumZero(buf) >> 8) -} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go deleted file mode 100644 index d33a6be95..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by `gen.exe`. DO NOT EDIT. - -package lz4stream - -import "github.com/pierrec/lz4/v4/internal/lz4block" - -// DescriptorFlags is defined as follow: -// field bits -// ----- ---- -// _ 2 -// ContentChecksum 1 -// Size 1 -// BlockChecksum 1 -// BlockIndependence 1 -// Version 2 -// _ 4 -// BlockSizeIndex 3 -// _ 1 -type DescriptorFlags uint16 - -// Getters. -func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 } -func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 } -func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 } -func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 } -func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) } -func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex { - return lz4block.BlockSizeIndex(x >> 12 & 0x7) -} - -// Setters. -func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags { - const b = 1 << 2 - if v { - *x = *x&^b | b - } else { - *x &^= b - } - return x -} -func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags { - const b = 1 << 3 - if v { - *x = *x&^b | b - } else { - *x &^= b - } - return x -} -func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags { - const b = 1 << 4 - if v { - *x = *x&^b | b - } else { - *x &^= b - } - return x -} -func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags { - const b = 1 << 5 - if v { - *x = *x&^b | b - } else { - *x &^= b - } - return x -} -func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags { - *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6) - return x -} -func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags { - *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12) - return x -} - -// Code generated by `gen.exe`. DO NOT EDIT. - -// DataBlockSize is defined as follow: -// field bits -// ----- ---- -// size 31 -// Uncompressed 1 -type DataBlockSize uint32 - -// Getters. -func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) } -func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 } - -// Setters. -func (x *DataBlockSize) sizeSet(v int) *DataBlockSize { - *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF - return x -} -func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize { - const b = 1 << 31 - if v { - *x = *x&^b | b - } else { - *x &^= b - } - return x -} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go deleted file mode 100644 index 651d10c10..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go +++ /dev/null @@ -1,212 +0,0 @@ -// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (ported from the reference implementation https://github.com/Cyan4973/xxHash/) -package xxh32 - -import ( - "encoding/binary" -) - -const ( - prime1 uint32 = 2654435761 - prime2 uint32 = 2246822519 - prime3 uint32 = 3266489917 - prime4 uint32 = 668265263 - prime5 uint32 = 374761393 - - primeMask = 0xFFFFFFFF - prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984 - prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535 -) - -// XXHZero represents an xxhash32 object with seed 0. -type XXHZero struct { - v [4]uint32 - totalLen uint64 - buf [16]byte - bufused int -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xxh XXHZero) Sum(b []byte) []byte { - h32 := xxh.Sum32() - return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -} - -// Reset resets the Hash to its initial state. -func (xxh *XXHZero) Reset() { - xxh.v[0] = prime1plus2 - xxh.v[1] = prime2 - xxh.v[2] = 0 - xxh.v[3] = prime1minus - xxh.totalLen = 0 - xxh.bufused = 0 -} - -// Size returns the number of bytes returned by Sum(). -func (xxh *XXHZero) Size() int { - return 4 -} - -// BlockSizeIndex gives the minimum number of bytes accepted by Write(). -func (xxh *XXHZero) BlockSize() int { - return 1 -} - -// Write adds input bytes to the Hash. -// It never returns an error. -func (xxh *XXHZero) Write(input []byte) (int, error) { - if xxh.totalLen == 0 { - xxh.Reset() - } - n := len(input) - m := xxh.bufused - - xxh.totalLen += uint64(n) - - r := len(xxh.buf) - m - if n < r { - copy(xxh.buf[m:], input) - xxh.bufused += len(input) - return n, nil - } - - var buf *[16]byte - if m != 0 { - // some data left from previous update - buf = &xxh.buf - c := copy(buf[m:], input) - n -= c - input = input[c:] - } - update(&xxh.v, buf, input) - xxh.bufused = copy(xxh.buf[:], input[n-n%16:]) - - return n, nil -} - -// Portable version of update. This updates v by processing all of buf -// (if not nil) and all full 16-byte blocks of input. -func updateGo(v *[4]uint32, buf *[16]byte, input []byte) { - // Causes compiler to work directly from registers instead of stack: - v1, v2, v3, v4 := v[0], v[1], v[2], v[3] - - if buf != nil { - v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1 - } - - for ; len(input) >= 16; input = input[16:] { - sub := input[:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 - } - v[0], v[1], v[2], v[3] = v1, v2, v3, v4 -} - -// Sum32 returns the 32 bits Hash value. -func (xxh *XXHZero) Sum32() uint32 { - h32 := uint32(xxh.totalLen) - if h32 >= 16 { - h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3]) - } else { - h32 += prime5 - } - - p := 0 - n := xxh.bufused - buf := xxh.buf - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3 - h32 = rol17(h32) * prime4 - } - for ; p < n; p++ { - h32 += uint32(buf[p]) * prime5 - h32 = rol11(h32) * prime1 - } - - h32 ^= h32 >> 15 - h32 *= prime2 - h32 ^= h32 >> 13 - h32 *= prime3 - h32 ^= h32 >> 16 - - return h32 -} - -// Portable version of ChecksumZero. -func checksumZeroGo(input []byte) uint32 { - n := len(input) - h32 := uint32(n) - - if n < 16 { - h32 += prime5 - } else { - v1 := prime1plus2 - v2 := prime2 - v3 := uint32(0) - v4 := prime1minus - p := 0 - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1 - } - input = input[p:] - n -= p - h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - } - - p := 0 - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3 - h32 = rol17(h32) * prime4 - } - for p < n { - h32 += uint32(input[p]) * prime5 - h32 = rol11(h32) * prime1 - p++ - } - - h32 ^= h32 >> 15 - h32 *= prime2 - h32 ^= h32 >> 13 - h32 *= prime3 - h32 ^= h32 >> 16 - - return h32 -} - -func rol1(u uint32) uint32 { - return u<<1 | u>>31 -} - -func rol7(u uint32) uint32 { - return u<<7 | u>>25 -} - -func rol11(u uint32) uint32 { - return u<<11 | u>>21 -} - -func rol12(u uint32) uint32 { - return u<<12 | u>>20 -} - -func rol13(u uint32) uint32 { - return u<<13 | u>>19 -} - -func rol17(u uint32) uint32 { - return u<<17 | u>>15 -} - -func rol18(u uint32) uint32 { - return u<<18 | u>>14 -} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go deleted file mode 100644 index 0978b2665..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !noasm - -package xxh32 - -// ChecksumZero returns the 32-bit hash of input. -// -//go:noescape -func ChecksumZero(input []byte) uint32 - -//go:noescape -func update(v *[4]uint32, buf *[16]byte, input []byte) diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s deleted file mode 100644 index c18ffd574..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s +++ /dev/null @@ -1,251 +0,0 @@ -// +build !noasm - -#include "go_asm.h" -#include "textflag.h" - -// Register allocation. -#define p R0 -#define n R1 -#define h R2 -#define v1 R2 // Alias for h. -#define v2 R3 -#define v3 R4 -#define v4 R5 -#define x1 R6 -#define x2 R7 -#define x3 R8 -#define x4 R9 - -// We need the primes in registers. The 16-byte loop only uses prime{1,2}. -#define prime1r R11 -#define prime2r R12 -#define prime3r R3 // The rest can alias v{2-4}. -#define prime4r R4 -#define prime5r R5 - -// Update round macros. These read from and increment p. - -#define round16aligned \ - MOVM.IA.W (p), [x1, x2, x3, x4] \ - \ - MULA x1, prime2r, v1, v1 \ - MULA x2, prime2r, v2, v2 \ - MULA x3, prime2r, v3, v3 \ - MULA x4, prime2r, v4, v4 \ - \ - MOVW v1 @> 19, v1 \ - MOVW v2 @> 19, v2 \ - MOVW v3 @> 19, v3 \ - MOVW v4 @> 19, v4 \ - \ - MUL prime1r, v1 \ - MUL prime1r, v2 \ - MUL prime1r, v3 \ - MUL prime1r, v4 \ - -#define round16unaligned \ - MOVBU.P 16(p), x1 \ - MOVBU -15(p), x2 \ - ORR x2 << 8, x1 \ - MOVBU -14(p), x3 \ - MOVBU -13(p), x4 \ - ORR x4 << 8, x3 \ - ORR x3 << 16, x1 \ - \ - MULA x1, prime2r, v1, v1 \ - MOVW v1 @> 19, v1 \ - MUL prime1r, v1 \ - \ - MOVBU -12(p), x1 \ - MOVBU -11(p), x2 \ - ORR x2 << 8, x1 \ - MOVBU -10(p), x3 \ - MOVBU -9(p), x4 \ - ORR x4 << 8, x3 \ - ORR x3 << 16, x1 \ - \ - MULA x1, prime2r, v2, v2 \ - MOVW v2 @> 19, v2 \ - MUL prime1r, v2 \ - \ - MOVBU -8(p), x1 \ - MOVBU -7(p), x2 \ - ORR x2 << 8, x1 \ - MOVBU -6(p), x3 \ - MOVBU -5(p), x4 \ - ORR x4 << 8, x3 \ - ORR x3 << 16, x1 \ - \ - MULA x1, prime2r, v3, v3 \ - MOVW v3 @> 19, v3 \ - MUL prime1r, v3 \ - \ - MOVBU -4(p), x1 \ - MOVBU -3(p), x2 \ - ORR x2 << 8, x1 \ - MOVBU -2(p), x3 \ - MOVBU -1(p), x4 \ - ORR x4 << 8, x3 \ - ORR x3 << 16, x1 \ - \ - MULA x1, prime2r, v4, v4 \ - MOVW v4 @> 19, v4 \ - MUL prime1r, v4 \ - - -// func ChecksumZero([]byte) uint32 -TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16 - MOVW input_base+0(FP), p - MOVW input_len+4(FP), n - - MOVW $const_prime1, prime1r - MOVW $const_prime2, prime2r - - // Set up h for n < 16. It's tempting to say {ADD prime5, n, h} - // here, but that's a pseudo-op that generates a load through R11. - MOVW $const_prime5, prime5r - ADD prime5r, n, h - CMP $0, n - BEQ end - - // We let n go negative so we can do comparisons with SUB.S - // instead of separate CMP. - SUB.S $16, n - BMI loop16done - - ADD prime1r, prime2r, v1 - MOVW prime2r, v2 - MOVW $0, v3 - RSB $0, prime1r, v4 - - TST $3, p - BNE loop16unaligned - -loop16aligned: - SUB.S $16, n - round16aligned - BPL loop16aligned - B loop16finish - -loop16unaligned: - SUB.S $16, n - round16unaligned - BPL loop16unaligned - -loop16finish: - MOVW v1 @> 31, h - ADD v2 @> 25, h - ADD v3 @> 20, h - ADD v4 @> 14, h - - // h += len(input) with v2 as temporary. - MOVW input_len+4(FP), v2 - ADD v2, h - -loop16done: - ADD $16, n // Restore number of bytes left. - - SUB.S $4, n - MOVW $const_prime3, prime3r - BMI loop4done - MOVW $const_prime4, prime4r - - TST $3, p - BNE loop4unaligned - -loop4aligned: - SUB.S $4, n - - MOVW.P 4(p), x1 - MULA prime3r, x1, h, h - MOVW h @> 15, h - MUL prime4r, h - - BPL loop4aligned - B loop4done - -loop4unaligned: - SUB.S $4, n - - MOVBU.P 4(p), x1 - MOVBU -3(p), x2 - ORR x2 << 8, x1 - MOVBU -2(p), x3 - ORR x3 << 16, x1 - MOVBU -1(p), x4 - ORR x4 << 24, x1 - - MULA prime3r, x1, h, h - MOVW h @> 15, h - MUL prime4r, h - - BPL loop4unaligned - -loop4done: - ADD.S $4, n // Restore number of bytes left. - BEQ end - - MOVW $const_prime5, prime5r - -loop1: - SUB.S $1, n - - MOVBU.P 1(p), x1 - MULA prime5r, x1, h, h - MOVW h @> 21, h - MUL prime1r, h - - BNE loop1 - -end: - MOVW $const_prime3, prime3r - EOR h >> 15, h - MUL prime2r, h - EOR h >> 13, h - MUL prime3r, h - EOR h >> 16, h - - MOVW h, ret+12(FP) - RET - - -// func update(v *[4]uint64, buf *[16]byte, p []byte) -TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20 - MOVW v+0(FP), p - MOVM.IA (p), [v1, v2, v3, v4] - - MOVW $const_prime1, prime1r - MOVW $const_prime2, prime2r - - // Process buf, if not nil. - MOVW buf+4(FP), p - CMP $0, p - BEQ noBuffered - - round16aligned - -noBuffered: - MOVW input_base +8(FP), p - MOVW input_len +12(FP), n - - SUB.S $16, n - BMI end - - TST $3, p - BNE loop16unaligned - -loop16aligned: - SUB.S $16, n - round16aligned - BPL loop16aligned - B end - -loop16unaligned: - SUB.S $16, n - round16unaligned - BPL loop16unaligned - -end: - MOVW v+0(FP), p - MOVM.IA [v1, v2, v3, v4], (p) - RET diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go deleted file mode 100644 index c96b59b8c..000000000 --- a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !arm noasm - -package xxh32 - -// ChecksumZero returns the 32-bit hash of input. -func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) } - -func update(v *[4]uint32, buf *[16]byte, input []byte) { - updateGo(v, buf, input) -} diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go deleted file mode 100644 index a62022e08..000000000 --- a/vendor/github.com/pierrec/lz4/v4/lz4.go +++ /dev/null @@ -1,157 +0,0 @@ -// Package lz4 implements reading and writing lz4 compressed data. -// -// The package supports both the LZ4 stream format, -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, -// and the LZ4 block format, defined at -// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html. -// -// See https://github.com/lz4/lz4 for the reference C implementation. -package lz4 - -import ( - "github.com/pierrec/lz4/v4/internal/lz4block" - "github.com/pierrec/lz4/v4/internal/lz4errors" -) - -func _() { - // Safety checks for duplicated elements. - var x [1]struct{} - _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast] - _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)] - _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)] - _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)] - _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)] -} - -// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. -func CompressBlockBound(n int) int { - return lz4block.CompressBlockBound(n) -} - -// UncompressBlock uncompresses the source buffer into the destination one, -// and returns the uncompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte) (int, error) { - return lz4block.UncompressBlock(src, dst, nil) -} - -// UncompressBlockWithDict uncompresses the source buffer into the destination one using a -// dictionary, and returns the uncompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlockWithDict(src, dst, dict []byte) (int, error) { - return lz4block.UncompressBlock(src, dst, dict) -} - -// A Compressor compresses data into the LZ4 block format. -// It uses a fast compression algorithm. -// -// A Compressor is not safe for concurrent use by multiple goroutines. -// -// Use a Writer to compress into the LZ4 stream format. -type Compressor struct{ c lz4block.Compressor } - -// CompressBlock compresses the source buffer src into the destination dst. -// -// If compression is successful, the first return value is the size of the -// compressed data, which is always >0. -// -// If dst has length at least CompressBlockBound(len(src)), compression always -// succeeds. Otherwise, the first return value is zero. The error return is -// non-nil if the compressed data does not fit in dst, but it might fit in a -// larger buffer that is still smaller than CompressBlockBound(len(src)). The -// return value (0, nil) means the data is likely incompressible and a buffer -// of length CompressBlockBound(len(src)) should be passed in. -func (c *Compressor) CompressBlock(src, dst []byte) (int, error) { - return c.c.CompressBlock(src, dst) -} - -// CompressBlock compresses the source buffer into the destination one. -// This is the fast version of LZ4 compression and also the default one. -// -// The argument hashTable is scratch space for a hash table used by the -// compressor. If provided, it should have length at least 1<<16. If it is -// shorter (or nil), CompressBlock allocates its own hash table. -// -// The size of the compressed data is returned. -// -// If the destination buffer size is lower than CompressBlockBound and -// the compressed size is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. - -// CompressBlock is equivalent to Compressor.CompressBlock. -// The final argument is ignored and should be set to nil. -// -// This function is deprecated. Use a Compressor instead. -func CompressBlock(src, dst []byte, _ []int) (int, error) { - return lz4block.CompressBlock(src, dst) -} - -// A CompressorHC compresses data into the LZ4 block format. -// Its compression ratio is potentially better than that of a Compressor, -// but it is also slower and requires more memory. -// -// A Compressor is not safe for concurrent use by multiple goroutines. -// -// Use a Writer to compress into the LZ4 stream format. -type CompressorHC struct { - // Level is the maximum search depth for compression. - // Values <= 0 mean no maximum. - Level CompressionLevel - c lz4block.CompressorHC -} - -// CompressBlock compresses the source buffer src into the destination dst. -// -// If compression is successful, the first return value is the size of the -// compressed data, which is always >0. -// -// If dst has length at least CompressBlockBound(len(src)), compression always -// succeeds. Otherwise, the first return value is zero. The error return is -// non-nil if the compressed data does not fit in dst, but it might fit in a -// larger buffer that is still smaller than CompressBlockBound(len(src)). The -// return value (0, nil) means the data is likely incompressible and a buffer -// of length CompressBlockBound(len(src)) should be passed in. -func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) { - return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level)) -} - -// CompressBlockHC is equivalent to CompressorHC.CompressBlock. -// The final two arguments are ignored and should be set to nil. -// -// This function is deprecated. Use a CompressorHC instead. -func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) { - return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth)) -} - -const ( - // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed - // block is corrupted or the destination buffer is not large enough for the uncompressed data. - ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer - // ErrInvalidFrame is returned when reading an invalid LZ4 archive. - ErrInvalidFrame = lz4errors.ErrInvalidFrame - // ErrInternalUnhandledState is an internal error. - ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState - // ErrInvalidHeaderChecksum is returned when reading a frame. - ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum - // ErrInvalidBlockChecksum is returned when reading a frame. - ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum - // ErrInvalidFrameChecksum is returned when reading a frame. - ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum - // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid. - ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel - // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object. - ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError - // ErrOptionInvalidBlockSize is returned when - ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize - // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it. - ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable - // ErrWriterNotClosed is returned when attempting to reset an unclosed writer. - ErrWriterNotClosed = lz4errors.ErrWriterNotClosed -) diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go deleted file mode 100644 index 46a873803..000000000 --- a/vendor/github.com/pierrec/lz4/v4/options.go +++ /dev/null @@ -1,214 +0,0 @@ -package lz4 - -import ( - "fmt" - "reflect" - "runtime" - - "github.com/pierrec/lz4/v4/internal/lz4block" - "github.com/pierrec/lz4/v4/internal/lz4errors" -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go - -type ( - applier interface { - Apply(...Option) error - private() - } - // Option defines the parameters to setup an LZ4 Writer or Reader. - Option func(applier) error -) - -// String returns a string representation of the option with its parameter(s). -func (o Option) String() string { - return o(nil).Error() -} - -// Default options. -var ( - DefaultBlockSizeOption = BlockSizeOption(Block4Mb) - DefaultChecksumOption = ChecksumOption(true) - DefaultConcurrency = ConcurrencyOption(1) - defaultOnBlockDone = OnBlockDoneOption(nil) -) - -const ( - Block64Kb BlockSize = 1 << (16 + iota*2) - Block256Kb - Block1Mb - Block4Mb -) - -// BlockSizeIndex defines the size of the blocks to be compressed. -type BlockSize uint32 - -// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb). -func BlockSizeOption(size BlockSize) Option { - return func(a applier) error { - switch w := a.(type) { - case nil: - s := fmt.Sprintf("BlockSizeOption(%s)", size) - return lz4errors.Error(s) - case *Writer: - size := uint32(size) - if !lz4block.IsValid(size) { - return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) - } - w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -// BlockChecksumOption enables or disables block checksum (default=false). -func BlockChecksumOption(flag bool) Option { - return func(a applier) error { - switch w := a.(type) { - case nil: - s := fmt.Sprintf("BlockChecksumOption(%v)", flag) - return lz4errors.Error(s) - case *Writer: - w.frame.Descriptor.Flags.BlockChecksumSet(flag) - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -// ChecksumOption enables/disables all blocks or content checksum (default=true). -func ChecksumOption(flag bool) Option { - return func(a applier) error { - switch w := a.(type) { - case nil: - s := fmt.Sprintf("ChecksumOption(%v)", flag) - return lz4errors.Error(s) - case *Writer: - w.frame.Descriptor.Flags.ContentChecksumSet(flag) - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the -// whole uncompressed data stream. -func SizeOption(size uint64) Option { - return func(a applier) error { - switch w := a.(type) { - case nil: - s := fmt.Sprintf("SizeOption(%d)", size) - return lz4errors.Error(s) - case *Writer: - w.frame.Descriptor.Flags.SizeSet(size > 0) - w.frame.Descriptor.ContentSize = size - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -// ConcurrencyOption sets the number of go routines used for compression. -// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used. -func ConcurrencyOption(n int) Option { - if n <= 0 { - n = runtime.GOMAXPROCS(0) - } - return func(a applier) error { - switch rw := a.(type) { - case nil: - s := fmt.Sprintf("ConcurrencyOption(%d)", n) - return lz4errors.Error(s) - case *Writer: - rw.num = n - return nil - case *Reader: - rw.num = n - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression. -type CompressionLevel uint32 - -const ( - Fast CompressionLevel = 0 - Level1 CompressionLevel = 1 << (8 + iota) - Level2 - Level3 - Level4 - Level5 - Level6 - Level7 - Level8 - Level9 -) - -// CompressionLevelOption defines the compression level (default=Fast). -func CompressionLevelOption(level CompressionLevel) Option { - return func(a applier) error { - switch w := a.(type) { - case nil: - s := fmt.Sprintf("CompressionLevelOption(%s)", level) - return lz4errors.Error(s) - case *Writer: - switch level { - case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: - default: - return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) - } - w.level = lz4block.CompressionLevel(level) - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -func onBlockDone(int) {} - -// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed, -// for a Reader, it is when it has been uncompressed. -func OnBlockDoneOption(handler func(size int)) Option { - if handler == nil { - handler = onBlockDone - } - return func(a applier) error { - switch rw := a.(type) { - case nil: - s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String()) - return lz4errors.Error(s) - case *Writer: - rw.handler = handler - return nil - case *Reader: - rw.handler = handler - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} - -// LegacyOption provides support for writing LZ4 frames in the legacy format. -// -// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame. -// -// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where -// the compressed stream is followed by the original (uncompressed) size of -// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf). -// This is also supported as a special case. -func LegacyOption(legacy bool) Option { - return func(a applier) error { - switch rw := a.(type) { - case nil: - s := fmt.Sprintf("LegacyOption(%v)", legacy) - return lz4errors.Error(s) - case *Writer: - rw.legacy = legacy - return nil - } - return lz4errors.ErrOptionNotApplicable - } -} diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go deleted file mode 100644 index 2de814909..000000000 --- a/vendor/github.com/pierrec/lz4/v4/options_gen.go +++ /dev/null @@ -1,92 +0,0 @@ -// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT. - -package lz4 - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Block64Kb-65536] - _ = x[Block256Kb-262144] - _ = x[Block1Mb-1048576] - _ = x[Block4Mb-4194304] -} - -const ( - _BlockSize_name_0 = "Block64Kb" - _BlockSize_name_1 = "Block256Kb" - _BlockSize_name_2 = "Block1Mb" - _BlockSize_name_3 = "Block4Mb" -) - -func (i BlockSize) String() string { - switch { - case i == 65536: - return _BlockSize_name_0 - case i == 262144: - return _BlockSize_name_1 - case i == 1048576: - return _BlockSize_name_2 - case i == 4194304: - return _BlockSize_name_3 - default: - return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")" - } -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Fast-0] - _ = x[Level1-512] - _ = x[Level2-1024] - _ = x[Level3-2048] - _ = x[Level4-4096] - _ = x[Level5-8192] - _ = x[Level6-16384] - _ = x[Level7-32768] - _ = x[Level8-65536] - _ = x[Level9-131072] -} - -const ( - _CompressionLevel_name_0 = "Fast" - _CompressionLevel_name_1 = "Level1" - _CompressionLevel_name_2 = "Level2" - _CompressionLevel_name_3 = "Level3" - _CompressionLevel_name_4 = "Level4" - _CompressionLevel_name_5 = "Level5" - _CompressionLevel_name_6 = "Level6" - _CompressionLevel_name_7 = "Level7" - _CompressionLevel_name_8 = "Level8" - _CompressionLevel_name_9 = "Level9" -) - -func (i CompressionLevel) String() string { - switch { - case i == 0: - return _CompressionLevel_name_0 - case i == 512: - return _CompressionLevel_name_1 - case i == 1024: - return _CompressionLevel_name_2 - case i == 2048: - return _CompressionLevel_name_3 - case i == 4096: - return _CompressionLevel_name_4 - case i == 8192: - return _CompressionLevel_name_5 - case i == 16384: - return _CompressionLevel_name_6 - case i == 32768: - return _CompressionLevel_name_7 - case i == 65536: - return _CompressionLevel_name_8 - case i == 131072: - return _CompressionLevel_name_9 - default: - return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")" - } -} diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go deleted file mode 100644 index 275daad7c..000000000 --- a/vendor/github.com/pierrec/lz4/v4/reader.go +++ /dev/null @@ -1,275 +0,0 @@ -package lz4 - -import ( - "bytes" - "io" - - "github.com/pierrec/lz4/v4/internal/lz4block" - "github.com/pierrec/lz4/v4/internal/lz4errors" - "github.com/pierrec/lz4/v4/internal/lz4stream" -) - -var readerStates = []aState{ - noState: newState, - errorState: newState, - newState: readState, - readState: closedState, - closedState: newState, -} - -// NewReader returns a new LZ4 frame decoder. -func NewReader(r io.Reader) *Reader { - return newReader(r, false) -} - -func newReader(r io.Reader, legacy bool) *Reader { - zr := &Reader{frame: lz4stream.NewFrame()} - zr.state.init(readerStates) - _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone) - zr.Reset(r) - return zr -} - -// Reader allows reading an LZ4 stream. -type Reader struct { - state _State - src io.Reader // source reader - num int // concurrency level - frame *lz4stream.Frame // frame being read - data []byte // block buffer allocated in non concurrent mode - reads chan []byte // pending data - idx int // size of pending data - handler func(int) - cum uint32 - dict []byte -} - -func (*Reader) private() {} - -func (r *Reader) Apply(options ...Option) (err error) { - defer r.state.check(&err) - switch r.state.state { - case newState: - case errorState: - return r.state.err - default: - return lz4errors.ErrOptionClosedOrError - } - for _, o := range options { - if err = o(r); err != nil { - return - } - } - return -} - -// Size returns the size of the underlying uncompressed data, if set in the stream. -func (r *Reader) Size() int { - switch r.state.state { - case readState, closedState: - if r.frame.Descriptor.Flags.Size() { - return int(r.frame.Descriptor.ContentSize) - } - } - return 0 -} - -func (r *Reader) isNotConcurrent() bool { - return r.num == 1 -} - -func (r *Reader) init() error { - err := r.frame.ParseHeaders(r.src) - if err != nil { - return err - } - if !r.frame.Descriptor.Flags.BlockIndependence() { - // We can't decompress dependent blocks concurrently. - // Instead of throwing an error to the user, silently drop concurrency - r.num = 1 - } - data, err := r.frame.InitR(r.src, r.num) - if err != nil { - return err - } - r.reads = data - r.idx = 0 - size := r.frame.Descriptor.Flags.BlockSizeIndex() - r.data = size.Get() - r.cum = 0 - return nil -} - -func (r *Reader) Read(buf []byte) (n int, err error) { - defer r.state.check(&err) - switch r.state.state { - case readState: - case closedState, errorState: - return 0, r.state.err - case newState: - // First initialization. - if err = r.init(); r.state.next(err) { - return - } - default: - return 0, r.state.fail() - } - for len(buf) > 0 { - var bn int - if r.idx == 0 { - if r.isNotConcurrent() { - bn, err = r.read(buf) - } else { - lz4block.Put(r.data) - r.data = <-r.reads - if len(r.data) == 0 { - // No uncompressed data: something went wrong or we are done. - err = r.frame.Blocks.ErrorR() - } - } - switch err { - case nil: - case io.EOF: - if er := r.frame.CloseR(r.src); er != nil { - err = er - } - lz4block.Put(r.data) - r.data = nil - return - default: - return - } - } - if bn == 0 { - // Fill buf with buffered data. - bn = copy(buf, r.data[r.idx:]) - r.idx += bn - if r.idx == len(r.data) { - // All data read, get ready for the next Read. - r.idx = 0 - } - } - buf = buf[bn:] - n += bn - r.handler(bn) - } - return -} - -// read uncompresses the next block as follow: -// - if buf has enough room, the block is uncompressed into it directly -// and the lenght of used space is returned -// - else, the uncompress data is stored in r.data and 0 is returned -func (r *Reader) read(buf []byte) (int, error) { - block := r.frame.Blocks.Block - _, err := block.Read(r.frame, r.src, r.cum) - if err != nil { - return 0, err - } - var direct bool - dst := r.data[:cap(r.data)] - if len(buf) >= len(dst) { - // Uncompress directly into buf. - direct = true - dst = buf - } - dst, err = block.Uncompress(r.frame, dst, r.dict, true) - if err != nil { - return 0, err - } - if !r.frame.Descriptor.Flags.BlockIndependence() { - if len(r.dict)+len(dst) > 128*1024 { - preserveSize := 64*1024 - len(dst) - if preserveSize < 0 { - preserveSize = 0 - } - r.dict = r.dict[len(r.dict)-preserveSize:] - } - r.dict = append(r.dict, dst...) - } - r.cum += uint32(len(dst)) - if direct { - return len(dst), nil - } - r.data = dst - return 0, nil -} - -// Reset clears the state of the Reader r such that it is equivalent to its -// initial state from NewReader, but instead reading from reader. -// No access to reader is performed. -func (r *Reader) Reset(reader io.Reader) { - if r.data != nil { - lz4block.Put(r.data) - r.data = nil - } - r.frame.Reset(r.num) - r.state.reset() - r.src = reader - r.reads = nil -} - -// WriteTo efficiently uncompresses the data from the Reader underlying source to w. -func (r *Reader) WriteTo(w io.Writer) (n int64, err error) { - switch r.state.state { - case closedState, errorState: - return 0, r.state.err - case newState: - if err = r.init(); r.state.next(err) { - return - } - default: - return 0, r.state.fail() - } - defer r.state.nextd(&err) - - var data []byte - if r.isNotConcurrent() { - size := r.frame.Descriptor.Flags.BlockSizeIndex() - data = size.Get() - defer lz4block.Put(data) - } - for { - var bn int - var dst []byte - if r.isNotConcurrent() { - bn, err = r.read(data) - dst = data[:bn] - } else { - lz4block.Put(dst) - dst = <-r.reads - bn = len(dst) - if bn == 0 { - // No uncompressed data: something went wrong or we are done. - err = r.frame.Blocks.ErrorR() - } - } - switch err { - case nil: - case io.EOF: - err = r.frame.CloseR(r.src) - return - default: - return - } - r.handler(bn) - bn, err = w.Write(dst) - n += int64(bn) - if err != nil { - return - } - } -} - -// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header. -func ValidFrameHeader(in []byte) (bool, error) { - f := lz4stream.NewFrame() - err := f.ParseHeaders(bytes.NewReader(in)) - if err == nil { - return true, nil - } - if err == lz4errors.ErrInvalidFrame { - return false, nil - } - return false, err -} diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go deleted file mode 100644 index d94f04d05..000000000 --- a/vendor/github.com/pierrec/lz4/v4/state.go +++ /dev/null @@ -1,75 +0,0 @@ -package lz4 - -import ( - "errors" - "fmt" - "io" - - "github.com/pierrec/lz4/v4/internal/lz4errors" -) - -//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go - -const ( - noState aState = iota // uninitialized reader - errorState // unrecoverable error encountered - newState // instantiated object - readState // reading data - writeState // writing data - closedState // all done -) - -type ( - aState uint8 - _State struct { - states []aState - state aState - err error - } -) - -func (s *_State) init(states []aState) { - s.states = states - s.state = states[0] -} - -func (s *_State) reset() { - s.state = s.states[0] - s.err = nil -} - -// next sets the state to the next one unless it is passed a non nil error. -// It returns whether or not it is in error. -func (s *_State) next(err error) bool { - if err != nil { - s.err = fmt.Errorf("%s: %w", s.state, err) - s.state = errorState - return true - } - s.state = s.states[s.state] - return false -} - -// nextd is like next but for defers. -func (s *_State) nextd(errp *error) bool { - return errp != nil && s.next(*errp) -} - -// check sets s in error if not already in error and if the error is not nil or io.EOF, -func (s *_State) check(errp *error) { - if s.state == errorState || errp == nil { - return - } - if err := *errp; err != nil { - s.err = fmt.Errorf("%w[%s]", err, s.state) - if !errors.Is(err, io.EOF) { - s.state = errorState - } - } -} - -func (s *_State) fail() error { - s.state = errorState - s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state) - return s.err -} diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go deleted file mode 100644 index 75fb82892..000000000 --- a/vendor/github.com/pierrec/lz4/v4/state_gen.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT. - -package lz4 - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[noState-0] - _ = x[errorState-1] - _ = x[newState-2] - _ = x[readState-3] - _ = x[writeState-4] - _ = x[closedState-5] -} - -const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState" - -var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55} - -func (i aState) String() string { - if i >= aState(len(_aState_index)-1) { - return "aState(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _aState_name[_aState_index[i]:_aState_index[i+1]] -} diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go deleted file mode 100644 index 77699f2b5..000000000 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ /dev/null @@ -1,238 +0,0 @@ -package lz4 - -import ( - "io" - - "github.com/pierrec/lz4/v4/internal/lz4block" - "github.com/pierrec/lz4/v4/internal/lz4errors" - "github.com/pierrec/lz4/v4/internal/lz4stream" -) - -var writerStates = []aState{ - noState: newState, - newState: writeState, - writeState: closedState, - closedState: newState, - errorState: newState, -} - -// NewWriter returns a new LZ4 frame encoder. -func NewWriter(w io.Writer) *Writer { - zw := &Writer{frame: lz4stream.NewFrame()} - zw.state.init(writerStates) - _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone) - zw.Reset(w) - return zw -} - -// Writer allows writing an LZ4 stream. -type Writer struct { - state _State - src io.Writer // destination writer - level lz4block.CompressionLevel // how hard to try - num int // concurrency level - frame *lz4stream.Frame // frame being built - data []byte // pending data - idx int // size of pending data - handler func(int) - legacy bool -} - -func (*Writer) private() {} - -func (w *Writer) Apply(options ...Option) (err error) { - defer w.state.check(&err) - switch w.state.state { - case newState: - case errorState: - return w.state.err - default: - return lz4errors.ErrOptionClosedOrError - } - w.Reset(w.src) - for _, o := range options { - if err = o(w); err != nil { - return - } - } - return -} - -func (w *Writer) isNotConcurrent() bool { - return w.num == 1 -} - -// init sets up the Writer when in newState. It does not change the Writer state. -func (w *Writer) init() error { - w.frame.InitW(w.src, w.num, w.legacy) - size := w.frame.Descriptor.Flags.BlockSizeIndex() - w.data = size.Get() - w.idx = 0 - return w.frame.Descriptor.Write(w.frame, w.src) -} - -func (w *Writer) Write(buf []byte) (n int, err error) { - defer w.state.check(&err) - switch w.state.state { - case writeState: - case closedState, errorState: - return 0, w.state.err - case newState: - if err = w.init(); w.state.next(err) { - return - } - default: - return 0, w.state.fail() - } - - zn := len(w.data) - for len(buf) > 0 { - if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err = w.write(buf[:zn], false); err != nil { - return - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(w.data[w.idx:], buf) - n += m - w.idx += m - buf = buf[m:] - - if w.idx < len(w.data) { - // Buffer not filled. - return - } - - // Buffer full. - if err = w.write(w.data, true); err != nil { - return - } - if !w.isNotConcurrent() { - size := w.frame.Descriptor.Flags.BlockSizeIndex() - w.data = size.Get() - } - w.idx = 0 - } - return -} - -func (w *Writer) write(data []byte, safe bool) error { - if w.isNotConcurrent() { - block := w.frame.Blocks.Block - err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src) - w.handler(len(block.Data)) - return err - } - c := make(chan *lz4stream.FrameDataBlock) - w.frame.Blocks.Blocks <- c - go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) { - b := lz4stream.NewFrameDataBlock(w.frame) - c <- b.Compress(w.frame, data, w.level) - <-c - w.handler(len(b.Data)) - b.Close(w.frame) - if safe { - // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed - lz4block.Put(data) - } - }(c, data, safe) - - return nil -} - -// Flush any buffered data to the underlying writer immediately. -func (w *Writer) Flush() (err error) { - switch w.state.state { - case writeState: - case errorState: - return w.state.err - default: - return nil - } - - if w.idx > 0 { - // Flush pending data, disable w.data freeing as it is done later on. - if err = w.write(w.data[:w.idx], false); err != nil { - return err - } - w.idx = 0 - } - return nil -} - -// Close closes the Writer, flushing any unwritten data to the underlying writer -// without closing it. -func (w *Writer) Close() error { - if err := w.Flush(); err != nil { - return err - } - err := w.frame.CloseW(w.src, w.num) - // It is now safe to free the buffer. - if w.data != nil { - lz4block.Put(w.data) - w.data = nil - } - return err -} - -// Reset clears the state of the Writer w such that it is equivalent to its -// initial state from NewWriter, but instead writing to writer. -// Reset keeps the previous options unless overwritten by the supplied ones. -// No access to writer is performed. -// -// w.Close must be called before Reset or pending data may be dropped. -func (w *Writer) Reset(writer io.Writer) { - w.frame.Reset(w.num) - w.state.reset() - w.src = writer -} - -// ReadFrom efficiently reads from r and compressed into the Writer destination. -func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { - switch w.state.state { - case closedState, errorState: - return 0, w.state.err - case newState: - if err = w.init(); w.state.next(err) { - return - } - default: - return 0, w.state.fail() - } - defer w.state.check(&err) - - size := w.frame.Descriptor.Flags.BlockSizeIndex() - var done bool - var rn int - data := size.Get() - if w.isNotConcurrent() { - // Keep the same buffer for the whole process. - defer lz4block.Put(data) - } - for !done { - rn, err = io.ReadFull(r, data) - switch err { - case nil: - case io.EOF, io.ErrUnexpectedEOF: // read may be partial - done = true - default: - return - } - n += int64(rn) - err = w.write(data[:rn], true) - if err != nil { - return - } - w.handler(rn) - if !done && !w.isNotConcurrent() { - // The buffer will be returned automatically by go routines (safe=true) - // so get a new one fo the next round. - data = size.Get() - } - } - return -} diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go deleted file mode 100644 index 163645b86..000000000 --- a/vendor/golang.org/x/net/http/httpproxy/proxy.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httpproxy provides support for HTTP proxy determination -// based on environment variables, as provided by net/http's -// ProxyFromEnvironment function. -// -// The API is not subject to the Go 1 compatibility promise and may change at -// any time. -package httpproxy - -import ( - "errors" - "fmt" - "net" - "net/url" - "os" - "strings" - "unicode/utf8" - - "golang.org/x/net/idna" -) - -// Config holds configuration for HTTP proxy settings. See -// FromEnvironment for details. -type Config struct { - // HTTPProxy represents the value of the HTTP_PROXY or - // http_proxy environment variable. It will be used as the proxy - // URL for HTTP requests and HTTPS requests unless overridden by - // HTTPSProxy or NoProxy. - HTTPProxy string - - // HTTPSProxy represents the HTTPS_PROXY or https_proxy - // environment variable. It will be used as the proxy URL for - // HTTPS requests unless overridden by NoProxy. - HTTPSProxy string - - // NoProxy represents the NO_PROXY or no_proxy environment - // variable. It specifies a string that contains comma-separated values - // specifying hosts that should be excluded from proxying. Each value is - // represented by an IP address prefix (1.2.3.4), an IP address prefix in - // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*). - // An IP address prefix and domain name can also include a literal port - // number (1.2.3.4:80). - // A domain name matches that name and all subdomains. A domain name with - // a leading "." matches subdomains only. For example "foo.com" matches - // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com". - // A single asterisk (*) indicates that no proxying should be done. - // A best effort is made to parse the string and errors are - // ignored. - NoProxy string - - // CGI holds whether the current process is running - // as a CGI handler (FromEnvironment infers this from the - // presence of a REQUEST_METHOD environment variable). - // When this is set, ProxyForURL will return an error - // when HTTPProxy applies, because a client could be - // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy. - CGI bool -} - -// config holds the parsed configuration for HTTP proxy settings. -type config struct { - // Config represents the original configuration as defined above. - Config - - // httpsProxy is the parsed URL of the HTTPSProxy if defined. - httpsProxy *url.URL - - // httpProxy is the parsed URL of the HTTPProxy if defined. - httpProxy *url.URL - - // ipMatchers represent all values in the NoProxy that are IP address - // prefixes or an IP address in CIDR notation. - ipMatchers []matcher - - // domainMatchers represent all values in the NoProxy that are a domain - // name or hostname & domain name - domainMatchers []matcher -} - -// FromEnvironment returns a Config instance populated from the -// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the -// lowercase versions thereof). HTTPS_PROXY takes precedence over -// HTTP_PROXY for https requests. -// -// The environment values may be either a complete URL or a -// "host[:port]", in which case the "http" scheme is assumed. An error -// is returned if the value is a different form. -func FromEnvironment() *Config { - return &Config{ - HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"), - HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), - NoProxy: getEnvAny("NO_PROXY", "no_proxy"), - CGI: os.Getenv("REQUEST_METHOD") != "", - } -} - -func getEnvAny(names ...string) string { - for _, n := range names { - if val := os.Getenv(n); val != "" { - return val - } - } - return "" -} - -// ProxyFunc returns a function that determines the proxy URL to use for -// a given request URL. Changing the contents of cfg will not affect -// proxy functions created earlier. -// -// A nil URL and nil error are returned if no proxy is defined in the -// environment, or a proxy should not be used for the given request, as -// defined by NO_PROXY. -// -// As a special case, if req.URL.Host is "localhost" (with or without a -// port number), then a nil URL and nil error will be returned. -func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { - // Preprocess the Config settings for more efficient evaluation. - cfg1 := &config{ - Config: *cfg, - } - cfg1.init() - return cfg1.proxyForURL -} - -func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { - var proxy *url.URL - if reqURL.Scheme == "https" { - proxy = cfg.httpsProxy - } - if proxy == nil { - proxy = cfg.httpProxy - if proxy != nil && cfg.CGI { - return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") - } - } - if proxy == nil { - return nil, nil - } - if !cfg.useProxy(canonicalAddr(reqURL)) { - return nil, nil - } - - return proxy, nil -} - -func parseProxy(proxy string) (*url.URL, error) { - if proxy == "" { - return nil, nil - } - - proxyURL, err := url.Parse(proxy) - if err != nil || - (proxyURL.Scheme != "http" && - proxyURL.Scheme != "https" && - proxyURL.Scheme != "socks5") { - // proxy was bogus. Try prepending "http://" to it and - // see if that parses correctly. If not, we fall - // through and complain about the original one. - if proxyURL, err := url.Parse("http://" + proxy); err == nil { - return proxyURL, nil - } - } - if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) - } - return proxyURL, nil -} - -// useProxy reports whether requests to addr should use a proxy, -// according to the NO_PROXY or no_proxy environment variable. -// addr is always a canonicalAddr with a host and port. -func (cfg *config) useProxy(addr string) bool { - if len(addr) == 0 { - return true - } - host, port, err := net.SplitHostPort(addr) - if err != nil { - return false - } - if host == "localhost" { - return false - } - ip := net.ParseIP(host) - if ip != nil { - if ip.IsLoopback() { - return false - } - } - - addr = strings.ToLower(strings.TrimSpace(host)) - - if ip != nil { - for _, m := range cfg.ipMatchers { - if m.match(addr, port, ip) { - return false - } - } - } - for _, m := range cfg.domainMatchers { - if m.match(addr, port, ip) { - return false - } - } - return true -} - -func (c *config) init() { - if parsed, err := parseProxy(c.HTTPProxy); err == nil { - c.httpProxy = parsed - } - if parsed, err := parseProxy(c.HTTPSProxy); err == nil { - c.httpsProxy = parsed - } - - for _, p := range strings.Split(c.NoProxy, ",") { - p = strings.ToLower(strings.TrimSpace(p)) - if len(p) == 0 { - continue - } - - if p == "*" { - c.ipMatchers = []matcher{allMatch{}} - c.domainMatchers = []matcher{allMatch{}} - return - } - - // IPv4/CIDR, IPv6/CIDR - if _, pnet, err := net.ParseCIDR(p); err == nil { - c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) - continue - } - - // IPv4:port, [IPv6]:port - phost, pport, err := net.SplitHostPort(p) - if err == nil { - if len(phost) == 0 { - // There is no host part, likely the entry is malformed; ignore. - continue - } - if phost[0] == '[' && phost[len(phost)-1] == ']' { - phost = phost[1 : len(phost)-1] - } - } else { - phost = p - } - // IPv4, IPv6 - if pip := net.ParseIP(phost); pip != nil { - c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) - continue - } - - if len(phost) == 0 { - // There is no host part, likely the entry is malformed; ignore. - continue - } - - // domain.com or domain.com:80 - // foo.com matches bar.foo.com - // .domain.com or .domain.com:port - // *.domain.com or *.domain.com:port - if strings.HasPrefix(phost, "*.") { - phost = phost[1:] - } - matchHost := false - if phost[0] != '.' { - matchHost = true - phost = "." + phost - } - c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) - } -} - -var portMap = map[string]string{ - "http": "80", - "https": "443", - "socks5": "1080", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -func canonicalAddr(url *url.URL) string { - addr := url.Hostname() - if v, err := idnaASCII(addr); err == nil { - addr = v - } - port := url.Port() - if port == "" { - port = portMap[url.Scheme] - } - return net.JoinHostPort(addr, port) -} - -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -func idnaASCII(v string) (string, error) { - // TODO: Consider removing this check after verifying performance is okay. - // Right now punycode verification, length checks, context checks, and the - // permissible character tests are all omitted. It also prevents the ToASCII - // call from salvaging an invalid IDN, when possible. As a result it may be - // possible to have two IDNs that appear identical to the user where the - // ASCII-only version causes an error downstream whereas the non-ASCII - // version does not. - // Note that for correct ASCII IDNs ToASCII will only do considerably more - // work, but it will not cause an allocation. - if isASCII(v) { - return v, nil - } - return idna.Lookup.ToASCII(v) -} - -func isASCII(s string) bool { - for i := 0; i < len(s); i++ { - if s[i] >= utf8.RuneSelf { - return false - } - } - return true -} - -// matcher represents the matching rule for a given value in the NO_PROXY list -type matcher interface { - // match returns true if the host and optional port or ip and optional port - // are allowed - match(host, port string, ip net.IP) bool -} - -// allMatch matches on all possible inputs -type allMatch struct{} - -func (a allMatch) match(host, port string, ip net.IP) bool { - return true -} - -type cidrMatch struct { - cidr *net.IPNet -} - -func (m cidrMatch) match(host, port string, ip net.IP) bool { - return m.cidr.Contains(ip) -} - -type ipMatch struct { - ip net.IP - port string -} - -func (m ipMatch) match(host, port string, ip net.IP) bool { - if m.ip.Equal(ip) { - return m.port == "" || m.port == port - } - return false -} - -type domainMatch struct { - host string - port string - - matchHost bool -} - -func (m domainMatch) match(host, port string, ip net.IP) bool { - if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { - return m.port == "" || m.port == port - } - return false -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 04ed063ce..76995775a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -68,7 +68,7 @@ github.com/gorilla/handlers github.com/honestbee/jobq # github.com/jmespath/go-jmespath v0.3.0 github.com/jmespath/go-jmespath -# github.com/longhorn/backupstore v0.0.0-20230125201126-0c2bd550ebc3 +# github.com/longhorn/backupstore v0.0.0-20220913112826-5f5c95274f2a github.com/longhorn/backupstore github.com/longhorn/backupstore/fsops github.com/longhorn/backupstore/http @@ -77,9 +77,9 @@ github.com/longhorn/backupstore/nfs github.com/longhorn/backupstore/s3 github.com/longhorn/backupstore/util github.com/longhorn/backupstore/vfs -# github.com/longhorn/go-iscsi-helper v0.0.0-20220927074943-051bf960608b +# github.com/longhorn/go-iscsi-helper v0.0.0-20230215045129-588aa7586e4c github.com/longhorn/go-iscsi-helper/util -# github.com/longhorn/longhorn-engine v1.3.2-0.20220929032851-7aac8ae9c8b4 +# github.com/longhorn/longhorn-engine v1.3.3-0.20230216042703-718990dc8a35 github.com/longhorn/longhorn-engine/pkg/backingfile github.com/longhorn/longhorn-engine/pkg/controller/client github.com/longhorn/longhorn-engine/pkg/meta @@ -96,12 +96,6 @@ github.com/longhorn/sparse-tools/sparse github.com/mschoch/smat # github.com/philhofer/fwd v1.0.0 github.com/philhofer/fwd -# github.com/pierrec/lz4/v4 v4.1.17 -github.com/pierrec/lz4/v4 -github.com/pierrec/lz4/v4/internal/lz4block -github.com/pierrec/lz4/v4/internal/lz4errors -github.com/pierrec/lz4/v4/internal/lz4stream -github.com/pierrec/lz4/v4/internal/xxh32 # github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/rancher/go-fibmap v0.0.0-20160418233256-5fc9f8c1ed47 @@ -121,7 +115,6 @@ github.com/willf/bitset # golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/net/context golang.org/x/net/http/httpguts -golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna