From 877d2a5c90a40304167ba66f6699ba37f2f2c687 Mon Sep 17 00:00:00 2001 From: Andrew Orme Date: Fri, 14 Jun 2024 00:26:07 -0400 Subject: [PATCH] CONF rebuild (v3) (#118) * Replace CONF package Signed-off-by: orme292 * New sample yaml files Signed-off-by: orme292 * Rename consts for sec check Signed-off-by: orme292 * Len return cannot be less than 0 Signed-off-by: orme292 * Combine statements Signed-off-by: orme292 * Linode: fatal exit if the bucket cannot be created. Signed-off-by: orme292 * Update READMEs Signed-off-by: orme292 * Update CHANGELOG Signed-off-by: orme292 --------- Signed-off-by: orme292 --- CHANGELOG.md | 41 ++-- README.md | 143 ++++++++------ VERSION | 2 +- conf/appconfig.go | 98 ---------- conf/builder.go | 81 ++++++++ conf/create.go | 128 ------------ conf/globals.go | 122 ++++++++++++ conf/helpers.go | 77 +++++--- conf/provider_akamai.go | 79 -------- conf/provider_aws.go | 107 ++++++---- conf/provider_linode.go | 69 +++++++ conf/provider_oci.go | 68 +++++-- conf/readconfig.go | 281 --------------------------- conf/type_appconfig.go | 123 ++++++++++++ conf/type_bucket.go | 34 ++++ conf/type_logopts.go | 36 ++++ conf/type_objects.go | 64 ++++++ conf/type_opts.go | 62 ++++++ conf/type_profile.go | 146 ++++++++++++++ conf/type_provider.go | 79 ++++++++ conf/type_tagopts.go | 33 ++++ conf/type_tags.go | 33 ++++ conf/types.go | 334 -------------------------------- conf/validate.go | 76 -------- docs/README_AKAMAI.md | 130 +++++++------ docs/README_OCI.md | 135 +++++++------ logbot/types.go | 2 +- main.go | 31 +-- profiles/example1.yaml | 37 ---- profiles/example2.yaml | 37 ---- profiles/example3.yaml | 30 --- profiles/example_aws.yaml | 39 ++++ profiles/example_linode.yaml | 39 ++++ profiles/example_oci.yaml | 39 ++++ s3packs/main.go | 10 +- s3packs/objectify/file_obj.go | 4 +- s3packs/objectify/helpers.go | 6 +- s3packs/pack_akamai/helpers.go | 11 +- s3packs/pack_akamai/iterator.go | 2 +- s3packs/pack_akamai/operator.go | 2 +- s3packs/provider/processor.go | 4 +- s3packs/types.go | 2 +- 42 files changed, 1473 insertions(+), 1403 deletions(-) delete mode 100644 conf/appconfig.go create mode 100644 conf/builder.go delete mode 100644 conf/create.go create mode 100644 conf/globals.go delete mode 100644 conf/provider_akamai.go create mode 100644 conf/provider_linode.go delete mode 100644 conf/readconfig.go create mode 100644 conf/type_appconfig.go create mode 100644 conf/type_bucket.go create mode 100644 conf/type_logopts.go create mode 100644 conf/type_objects.go create mode 100644 conf/type_opts.go create mode 100644 conf/type_profile.go create mode 100644 conf/type_provider.go create mode 100644 conf/type_tagopts.go create mode 100644 conf/type_tags.go delete mode 100644 conf/types.go delete mode 100644 conf/validate.go delete mode 100644 profiles/example1.yaml delete mode 100644 profiles/example2.yaml delete mode 100644 profiles/example3.yaml create mode 100644 profiles/example_aws.yaml create mode 100644 profiles/example_linode.yaml create mode 100644 profiles/example_oci.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index a4b8799..d5843b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,23 @@ This is the Changelog. Between each version, major or minor, I'll document all c bug fix, feature addition, or minor tweak. --- -### **1.3.4** (2023-02-13) + +### **1.4.0** (2024-06-14) + +- conf: package rebuilt to be modular and readable. +- conf: Akamai renamed to Linode because Linode is better. +- conf: Directories renamed 'Dirs' +- main: Update --help text +- main: support new conf package +- profiles: update for new conf package +- READMEs: updated with a slightly new format +- s3packs/objectify: support new conf package +- s3packs/pack_akamai: fatal error if bucket cannot be created. +- CHANGELOG: CHANGES LOGGED + +--- + +### **1.3.4** (2024-02-13) - conf: Added support for the Akamai provider - conf: Renamed provider-specific files like: provider_aws.go - conf: Better whitespace trimming from profile fields. @@ -22,7 +38,8 @@ bug fix, feature addition, or minor tweak. - CHANGELOG: CHANGES LOGGED --- -### **1.3.3a** (2023-02-12) + +### **1.3.3a** (2024-02-12) - Use Go 1.22.0 - Update Github Actions to use Go 1.22.0 - Update Dependencies: @@ -30,7 +47,7 @@ bug fix, feature addition, or minor tweak. - aws-sdk-go-v2/service/s3 v1.48.0 -> v1.48.1 - rs/zerolog v1.31.0 -> v1.32.0 -### **1.3.3** (2023-02-12) +### **1.3.3** (2024-02-12) - conf: Added support for the OCI provider - conf: Fixed a bug where ChecksumSHA256 was never read from the profile - s3packs/pack_oci: full support for OCI Object Storage (Oracle Cloud) @@ -41,14 +58,14 @@ bug fix, feature addition, or minor tweak. - README: updated with OCI information - README_OCI: added -### **1.3.2** (2023-01-12) +### **1.3.2** (2024-01-12) - s3packs/objectify: removed DirObjList and DirObj. RootList is now a slice of FileObjLists. -### **1.3.1** (2023-01-10) +### **1.3.1** (2024-01-10) - replaced old example profiles with a new one that's up to date - s3packs/objectify: comment update -### **1.3.0** (2023-01-07) +### **1.3.0** (2024-01-07) - s3pack: Removed s3pack - s3packs: Added s3packs, which has modular support for multiple providers. - s3packs/objectify: added objectify, that has an object-models for directory trees @@ -63,7 +80,7 @@ bug fix, feature addition, or minor tweak. - s3packs/pack_aws: added support for multipart parallel uploads with integrity checks. - s3packs/pack_aws: lets aws automatically calculate checksums, except for multipart uploads. -### **1.2.0** (2023-12-29) +### **1.2.0** (2024-12-29) - config: Remove config module - conf: Add conf module with new AppConfig model - conf: Profiles are not versioned, only version 2 will be supported @@ -73,20 +90,20 @@ bug fix, feature addition, or minor tweak. - s3pack: started using the new conf.AppConfig model, removed old config.Configuration model. Much cleaner. - README updated to reflect new config format and `--create` feature -### **1.1.0** (2023-12-21) +### **1.1.0** (2024-12-21) - Upgrade to AWS SDK for Go V2 - Move to Go 1.21.5 - s3pack: Checksum matching on successful upload - s3pack: Dropped multipart upload support (for now) in favor of checksum matching - s3pack: AWS SDK for Go V2 dropped the iterator model, so I wrote my own iterator implementation. -### **1.0.3** (2023-12-17) +### **1.0.3** (2024-12-17) - s3pack: concurrency for checksum calculations, more speed - s3pack: concurrency for checking for dupe objects, more speed - s3pack: counting uploads and ignored files is done on the fly - s3pack: display total uploaded bytes -### **1.0.2** (2023-12-13) +### **1.0.2** (2024-12-13) - config: add new options 'maxConcurrentUploads' - s3pack: add upload concurrency (handled at ObjectList level) - s3pack: config references changed to 'c' @@ -94,13 +111,13 @@ bug fix, feature addition, or minor tweak. - s3pack: FileObject has new individual Upload option, but it's unused. - s3pack: BucketExists checks are done once before processing any files/dirs (See main.go) -### **1.0.1** (2023-12-04) +### **1.0.1** (2024-12-04) - use gocritic suggestions - resolve gosec scan issues - fix ineffectual assignment - correct version number -### **1.0.0** (2023-12-03) +### **1.0.0** (2024-12-03) - config: More config profile validation occurs. - config: Added 'level' option to control the logging level (0 debug, 5 Panic) - config: console and file logging disabled by default diff --git a/README.md b/README.md index 5b4d97e..6295643 100644 --- a/README.md +++ b/README.md @@ -18,13 +18,15 @@ Special thanks to JetBrains!
--- ## About -**s3packer is aiming to be a highly configurable profile-based S3 storage upload and backup tool. Instead of crafting -and managing long complex commands, you can create YAML based profiles that will tell s3packer what to upload, -where to upload, how to name, and how to tag the files.** -**If you're going for redundancy, you can use profiles to upload to multiple S3 providers. s3packer currently supports -several services, like AWS, OCI (Oracle Cloud), and Akamai (Linode). I'm also fleshing out a plug-in system that makes -is easier to build out your own provider packs to support unsupported services.** +**s3packer is a configurable yaml-based S3 storage upload and backup tool. Instead of figuring out and managing complex +commands, you can create a YAML config that tells s3packer what to upload, where to upload it, how to name, and how to +tag the files.** + +**s3packer makes redundancy a breeze. Just use profiles to upload to multiple S3 providers. s3packer supports several +services: AWS, OCI (Oracle Cloud), and Linode (Akamai).** + +**Build support for other major projects by using the interfaces in the Provider package (s3packs/provider/).** --- @@ -35,7 +37,7 @@ See the [releases][releases_url] page... --- ## Providers -**s3packer** supports AWS S3, Oracle Cloud Object Storage, and Akamai (Linode) Object Storage. This readme will +**s3packer** supports AWS S3, Oracle Cloud Object Storage, and Linode (Akamai) Object Storage. This readme will go over using AWS as a provider, but there are additional docs available for other providers. - OCI: [README_OCI.md][s3packer_oci_readme_url] @@ -44,7 +46,7 @@ go over using AWS as a provider, but there are additional docs available for oth You can see sample profiles here: - [example1.yaml][example1_url] (AWS) - [example2.yaml][example2_url] (OCI) -- [example3.yaml][example3_url] (Akamai/Linode) +- [example3.yaml][example3_url] (Linode/Akamai) --- ## How to Use @@ -55,6 +57,8 @@ To start a session with an existing profile, just type in the following command: $ s3packer --profile="myprofile.yaml" ``` +--- + ## Creating a new Profile s3packer can create a base profile to help get you started. To create one, use the `--create` flag: @@ -63,28 +67,31 @@ s3packer can create a base profile to help get you started. To create one, use t $ s3packer --create="my-new-profile.yaml" ``` +--- + ## Setting up a Profile -s3packer profiles are written in the YAML format. To set it up, you just need to fill out a few fields, and you’ll be good to go! +s3packer profiles are written in YAML. To set one up, you just need to fill out a few fields, and you’ll be good to go! First, make sure you specify that you're using Version 4 of the profile format: ```yaml -Version: 4 +Version: 5 ``` Be sure to specify a provider: ```yaml -Provider: aws +Provider: + Use: aws ``` Use your AWS Key/Secret pair: ```yaml -Version: 4 -Provider: aws -AWS: +Version: 5 +Provider: + Use: aws Key: "my-key" Secret: "my-secret" ``` @@ -92,34 +99,33 @@ AWS: Or you can specify a profile that's already set up in your `~/.aws/credentials` file: ```yaml -Version: 4 -Provider: aws -AWS: - Profile: "my-profile" +Version: 5 +Provider: + Use: aws + Profile: "myAwsCliProfile"" ``` Configure your bucket: ```yaml Bucket: + Create: true Name: "deep-freeze" Region: "eu-north-1" ``` -And then, tell s3packer what you want to upload. You can specify folders, directories or individual files. (You can call -it the Folders section or the Directories section, it doesn't matter.) +And then, tell s3packer what you want to upload. You can specify directories or individual files. When you specify a +directory, s3packer will traverse all subdirectories. ```yaml -Uploads: - Folders: - - "/Users/forrest/docs/stocks/apple" - - "/Users/jenny/docs/song_lyrics" - Files: - - "/Users/forrest/docs/job-application-lawn-mower.pdf" - - "/Users/forrest/docs/dr-pepper-recipe.txt" - - "/Users/jenny/letters/from-forrest.docx" +Files: + - "/Users/forrest/docs/stocks/apple" + - "/Users/jenny/docs/song_lyrics" +Dirs: + - "/Users/forrest/docs/job-application-lawn-mower.pdf" + - "/Users/forrest/docs/dr-pepper-recipe.txt" + - "/Users/jenny/letters/from-forrest.docx" ``` - --- ### Tags @@ -127,23 +133,22 @@ Uploads: You can also add tags to your files. Just add a `Tags` section to your profile: ```yaml -Tagging: - Tags: - Author: "Forrest Gump" - Year: 1994 +Tags: + Author: "Forrest Gump" + Year: 1994 ``` --- -### Extra Options +### AWS Specific Options -You can also customize how your files are stored, accessed, tagged, and uploaded using these options. +Configure your object ACLs and the storage type. ---- ```yaml AWS: ACL: "private" Storage: "ONEZONE_IA" ``` + **ACL**
The default is `private`, but you can use any canned ACL: - `public-read` @@ -167,39 +172,50 @@ The default is `STANDARD`, but you can use any of the following storage classes: --- +### Extra Options + +You can also customize how your files are stored, accessed, tagged, and uploaded using these options. + +--- + ```yaml Objects: + NamingType: "relative" NamePrefix: "monthly-" - RootPrefix: "/backups/monthly" - Naming: "relative" + PathPrefix: "/backups/monthly" ``` +**NamingType**
+The default is `relative`. + +- `relative`: The key will be prepended with the relative path of the file on the local filesystem (individual files + specified in the profile will always end up at the root of the bucket, plus the `pathPrefix` and then `objectPrefix`). +- `absolute`: The key will be prepended with the absolute path of the file on the local filesystem. + **NamePrefix**
This is blank by default. Any value you put here will be added before the filename when it's uploaded to S3. Using something like `weekly-` will add that string to any file you're uploading, like `weekly-log.log` or `weekly-2021-01-01.log`. -**RootPrefix**
+**PathPrefix**
This is blank by default. Any value put here will be added before the file path when it's uploaded to S3. If you use something like `/backups/monthly`, the file will be uploaded to `/backups/monthly/your-file.txt`. -**Naming**
-The default is `relative`. -- `relative`: The key will be prepended with the relative path of the file on the local filesystem (individual files specified in the profile will always end up at the root of the bucket, plus the `pathPrefix` and then `objectPrefix`). -- `absolute`: The key will be prepended with the absolute path of the file on the local filesystem. - --- ```yaml Options: - MaxUploads: 100 - Overwrite: "never" + OverwriteObjects: "never" ``` +**MaxParts**
+The default depends on the provider. The AWS default is `100`. MaxParts specifies the number of pieces a large file will +be broken up into before uploading and reassembling. + **MaxUploads**
The default is `5`. This is the maximum number of files that will be uploaded at the same time. Concurrency is at the directory level, so the biggest speed gains are seen when uploading a directory with many files. -**Overwrite**
+**OverwriteObjects**
This is `never` by default. If you set it to `always`, s3packer will overwrite any files in the bucket that have the same name as what you're uploading. Useful if you're uploading a file that is updated over and over again. @@ -207,15 +223,18 @@ have the same name as what you're uploading. Useful if you're uploading a file t ```yaml Tagging: + OriginPath: true ChecksumSHA256: false - Origins: true ``` -**ChecksumSHA256**
-This is `true` by default. Every object uploaded will be tagged with the file's calculated SHA256 checksum. -**Origins**
-This is `true` by default. Every object uploaded will be tagged with the full absolute path of the file on the -local filesystem. This is useful if you want to be able to trace the origin of a file in S3. +**OriginPath**
+This is `true` by default. Every object uploaded will be tagged with the full absolute path of the file on the local +filesystem. This is useful if you want to be able to trace the origin of a file in S3. The tag name will be +`s3packer-origin-path`. + +**ChecksumSHA256**
+This is `true` by default. Every object uploaded will be tagged with the file's calculated SHA256 checksum. The tag name +will be `s3packer-checksum-sha256`. --- @@ -226,25 +245,25 @@ And if you like keeping track of things or want a paper trail, you can set up lo ```yaml Logging: Level: 1 - Console: true - File: true - Filepath: "/var/log/backup.log" + OutputToConsole: true + OutputToFile: true + Path: "/var/log/backup.log" ``` **Level:**
This is `2` by default. The setting is by severity, with 0 being least severe and 5 being most severe. 0 will log all messages (including debug), and 5 will only log fatal messages which cause the program to exit. -**Console:**
+**OutputToConsole:**
This is `true` by default. Outputs logging messages to standard output. If you set it to `false`, s3packer prints minimal output. -**File:**
-This is `false` by default. If you set it to `true`, s3packer will write structured log (JSON) messages to -a file. You MUST also specify a `Filepath`. +**OutputToFile:**
+This is `false` by default. If you set it to `true`, s3packer will write structured log (JSON) messages to a file. You +MUST also specify a `Path`. -**Filepath:**
-File to write structured log messages to. If you set `File` to `true`, you must specify a filename. +**Path:**
+Path of the file to write structured log messages to. If you set `OutputToFile` to `true`, you must specify a filename. The file will be created if it doesn't exist, and appended to if it does. --- diff --git a/VERSION b/VERSION index 2f93383..ec7b967 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.3.4 \ No newline at end of file +v1.4.0 \ No newline at end of file diff --git a/conf/appconfig.go b/conf/appconfig.go deleted file mode 100644 index b3652c6..0000000 --- a/conf/appconfig.go +++ /dev/null @@ -1,98 +0,0 @@ -package conf - -import ( - "github.com/orme292/s3packer/logbot" -) - -// NewAppConfig will build a new AppConfig object out of the specified yaml file. -// It creates a readConfig object and called the loadProfile() method to open, read, and unmarshal the yaml file. -// There is also a validateVersion() method that will eventually be fleshed out to make sure difference profile -// versions are unmarshalled correctly. -func NewAppConfig(file string) (ac *AppConfig, err error) { - rc := &readConfig{} - err = rc.loadProfile(file) - if err != nil { - return nil, err - } - - _, err = rc.validateVersion() - if err != nil { - return nil, err - } - - ac = &AppConfig{} - ac.setDefaults() - err = ac.transpose(rc) - return -} - -// setDefaults() sets the default values for the AppConfig object. -// -// It should be one of the first methods called after -// creating a new AppConfig object, but before the readConfig object values are transferred, or before apply() is -// called. -func (ac *AppConfig) setDefaults() { - ac.Bucket = &Bucket{Create: false} - ac.Objects = &Objects{Naming: NamingAbsolute} - ac.Opts = &Opts{ - MaxUploads: 5, - MaxParts: 1, - Overwrite: OverwriteNever, - } - ac.Tag = &TagOpts{ - ChecksumSHA256: true, - Origins: true, - } - ac.Log = &logbot.LogBot{ - Level: logbot.ERROR, - FlagConsole: true, - FlagFile: false, - } -} - -// apply will transfer the values from the readConfig object to the AppConfig object. It should be called after -// init() and after the readConfig object has been loaded and validated. -// Each struct of the AppConfig object is handled separately, and each should have its own readConfig method -// to handle validation of the values and transfer. -func (ac *AppConfig) transpose(r *readConfig) (err error) { - ac.LogOpts, err = r.transposeStructLogging() - ac.Log = &logbot.LogBot{ - Level: ac.LogOpts.Level, - FlagConsole: ac.LogOpts.Console, - FlagFile: ac.LogOpts.File, - Path: ac.LogOpts.Filepath, - } - if err != nil { - return - } - ac.Provider, err = r.transposeStructProvider() - if err != nil { - return - } - ac.Bucket, err = r.transposeStructBucket() - if err != nil { - return - } - ac.Objects, err = r.transposeStructObjects() - if err != nil { - return - } - ac.Opts, err = r.transposeStructOpts() - if err != nil { - return - } - ac.Tag, err = r.transposeStructTagOpts() - if err != nil { - return - } - ac.Tags, err = r.transposeStructTags() - if err != nil { - return - } - ac.Files, ac.Directories, err = r.transposeStructFileTargets() - if err != nil { - return - } - - return -} diff --git a/conf/builder.go b/conf/builder.go new file mode 100644 index 0000000..45a6816 --- /dev/null +++ b/conf/builder.go @@ -0,0 +1,81 @@ +package conf + +import ( + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v3" +) + +type Builder struct { + filename string + inc *ProfileIncoming + ac *AppConfig +} + +func NewBuilder(path string) *Builder { + + fpath, err := filepath.Abs(expandHome(path)) + if err != nil { + fpath = path + } + + return &Builder{ + filename: fpath, + inc: NewProfile(), + } + +} + +func (b *Builder) FromYaml() (*AppConfig, error) { + + err := b.inc.LoadFromYaml(b.filename) + if err != nil { + return b.ac, err + } + + b.ac = NewAppConfig() + err = b.ac.ImportFromProfile(b.inc) + if err != nil { + return b.ac, err + } + + return b.ac, nil + +} + +func (b *Builder) YamlOut() error { + + profile := NewProfile() + profile.loadSampleData() + + output, err := yaml.Marshal(&profile) + if err != nil { + return err + } + + _, err = canCreate(b.filename) + if err != nil { + return err + } + + f, err := os.Create(b.filename) + defer f.Close() + if err != nil { + return err + } + + n, err := f.WriteString("---\n") + if err != nil || n == 0 { + return fmt.Errorf("bad write: %v", err) + } + + n, err = f.Write(output) + if err != nil || n == 0 { + return fmt.Errorf("bad write: %v", err) + } + + return nil + +} diff --git a/conf/create.go b/conf/create.go deleted file mode 100644 index c2d6c7a..0000000 --- a/conf/create.go +++ /dev/null @@ -1,128 +0,0 @@ -package conf - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "gopkg.in/yaml.v3" -) - -// Create takes a filename as a string, and writes out a sample configuration profile. The file must not exist. -// The structure is built using a new struct, createProfile, that is based on readConfig -func Create(filename string) (err error) { - r := createProfile{} - r.Version = 4 - r.Provider = "aws|oci" - r.AWS.Profile = "default" - r.AWS.Key = "" - r.AWS.Secret = "" - r.AWS.ACL = AwsACLPrivate - r.AWS.Storage = AwsClassStandard - r.OCI.Profile = OciDefaultProfile - r.OCI.Compartment = "ocid1.compartment.oc1..abcdefghi0jklmn1op2qr3stuvwxyz..................." - r.OCI.Storage = OracleStorageTierStandard - r.Bucket.Name = "my-bucket" - r.Bucket.Region = "us-east-1" - r.Options.MaxUploads = 10 - r.Options.Overwrite = "never" - r.Tagging.ChecksumSHA256 = true - r.Tagging.Origins = true - r.Tagging.Tags = map[string]string{ - "hostname": "this host", - "author": "me", - } - r.Objects.NamePrefix = "" - r.Objects.RootPrefix = "" - r.Objects.Naming = "absolute" - r.Logging.Level = 2 - r.Logging.Console = true - r.Logging.File = false - r.Logging.Filepath = "" - r.Uploads.Files = []string{ - "file1.txt", - "file2.txt", - } - r.Uploads.Directories = []string{ - "/home/me/dir1", - "/home/me/dir2", - } - - o, err := yaml.Marshal(&r) - if err != nil { - return err - } - - filename, err = filepath.Abs(filepath.Clean(filename)) - if err != nil { - return err - } - - ok, err := canCreate(filename) - if !ok { - return fmt.Errorf("cannot create file %s: %s", filename, err.Error()) - } - - f, err := os.Create(filename) - if err != nil { - return err - } - defer func(f *os.File) { - err := f.Close() - if err != nil { - fmt.Printf("error closing file: %q\n", err.Error()) - os.Exit(1) - } - }(f) - - _, err = f.WriteString("---\n") - if err != nil { - return err - } - _, err = f.Write(o) - if err != nil { - return err - } - - fmt.Printf("--- Writing:\n%s\n\n", string(o)) - fmt.Printf("Wrote new profile to %q\n", filename) - return nil -} - -// canCreate checks whether a file can be created. It returns true if the file does not exist, and false if it does -// or if another error occurs. To figure it out if the program has permissions ot create the file, it attempts to -// create the file. If creation succeeds, then the file is immediately removed. -func canCreate(f string) (bool, error) { - filename, err := filepath.Abs(filepath.Clean(f)) - if err != nil { - return false, err - } - - // Resolve G304: Potential file inclusion via variable - if strings.Contains(filename, "..") { - return false, fmt.Errorf("invalid filename: %s", filename) - } - - _, err = os.Stat(filename) - if err != nil { - if os.IsNotExist(err) { - file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0o640) - if err != nil { - return false, err - } - err = file.Close() - if err != nil { - return false, err - } - err = os.Remove(filename) - if err != nil { - return false, err - } - return true, nil - } - return false, err - } - - return false, fmt.Errorf("file %s already exists", filename) -} diff --git a/conf/globals.go b/conf/globals.go new file mode 100644 index 0000000..1962938 --- /dev/null +++ b/conf/globals.go @@ -0,0 +1,122 @@ +package conf + +const ( + Empty = "" +) + +// AWS Constants +const ( + AwsACLPrivate = "private" + AwsACLPublicRead = "public-read" + AwsACLPublicReadWrite = "public-read-write" + AwsACLAuthenticatedRead = "authenticated-read" + AwsACLAwsExecRead = "aws-exec-read" + AwsACLBucketOwnerRead = "bucket-owner-read" + AwsACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + AwsClassStandard = "STANDARD" + AwsClassReducedRedundancy = "REDUCED_REDUNDANCY" + AwsClassGlacierIR = "GLACIER_IR" + AwsClassSnow = "SNOW" + AwsClassStandardIA = "STANDARD_IA" + AwsClassOneZoneIA = "ONEZONE_IA" + AwsClassIntelligentTiering = "INTELLIGENT_TIERING" + AwsClassGlacier = "GLACIER" + AwsClassDeepArchive = "DEEP_ARCHIVE" +) + +const ( + InvalidAWSACL = "invalid aws acl" + ErrorAWSProfileAndKeys = "you provided AWS profile name and key/secret pair, use profile or keys" + ErrorAWSMissingAuth = "must provide a valid AWS key pair" + ErrorAWSAuthNeeded = "must provide AWS profile name or key pair" +) + +// OCI Constants +const ( + OciDefaultProfile = "DEFAULT" +) + +const ( + ErrorOCICompartmentNotSpecified = "OCI compartment will be tenancy root" + ErrorOCIAuthNotSpecified = "OCI auth not specified" + ErrorOCIStorageNotSpecified = "OCI storage tier is not valid" +) + +const ( + OracleStorageTierStandard = "standard" + OracleStorageTierInfrequentAccess = "infrequentaccess" // the case is strange because of the + OracleStorageTierArchive = "archive" +) + +// Linode Constants +const ( + LinodeClusterAmsterdam = "nl-ams-1.linodeobjects.com" + LinodeClusterAtlanta = "us-southeast-1.linodeobjects.com" + LinodeClusterChennai = "in-maa-1.linodeobjects.com" + LinodeClusterChicago = "us-ord-1.linodeobjects.com" + LinodeClusterFrankfurt = "eu-central-1.linodeobjects.com" + LinodeClusterJakarta = "id-cgk-1.linodeobjects.com" + LinodeClusterLosAngeles = "us-lax-1.linodeobjects.com" + LinodeClusterMiami = "us-mia-1.linodeobjects.com" + LinodeClusterMilan = "it-mil-1.linodeobjects.com" + LinodeClusterNewark = "us-east-1.linodeobjects.com" + LinodeClusterOsaka = "jp-osa-1.linodeobjects.com" + LinodeClusterParis = "fr-par-1.linodeobjects.com" + LinodeClusterSaoPaulo = "br-gru-1.linodeobjects.com" + LinodeClusterSeattle = "us-sea-1.linodeobjects.com" + LinodeClusterSingapore = "ap-south-1.linodeobjects.com" + LinodeClusterStockholm = "se-sto-1.linodeobjects.com" + LinodeClusterAshburn = "us-iad-1.linodeobjects.com" +) + +const ( + LinodeRegionAmsterdam = "nl-ams-1" + LinodeRegionAtlanta = "us-southeast-1" + LinodeRegionChennai = "in-maa-1" + LinodeRegionChicago = "us-ord-1" + LinodeRegionFrankfurt = "eu-central-1" + LinodeRegionJakarta = "id-cgk-1" + LinodeRegionLosAngeles = "us-lax-1" + LinodeRegionMiami = "us-mia-1" + LinodeRegionMilan = "it-mil-1" + LinodeRegionNewark = "us-east-1" + LinodeRegionOsaka = "jp-osa-1" + LinodeRegionParis = "fr-par-1" + LinodeRegionSaoPaulo = "br-gru-1" + LinodeRegionSeattle = "us-sea-1" + LinodeRegionSingapore = "ap-south-1" + LinodeRegionStockholm = "se-sto-1" + LinodeRegionAshburn = "us-iad-1" +) + +const ( + LinodeInvalidRegion = "invalid Linode region provided" + LinodeAuthNeeded = "Linode authentication not specified" +) + +// Conf Errors +const ( + InvalidNamingType = "NamingType should be \"relative\" or \"absolute\"" + InvalidStorageClass = "invalid storage class" + InvalidOverwriteMethod = "invalid overwrite method" + InvalidTagChars = "invalid characters removed from tag" + + ErrorProfilePath = "error determining profile path" + ErrorOpeningProfile = "error opening profile" + ErrorReadingYaml = "error reading yaml" + + ErrorLoggingFilepathNotSpecified = "path to log file not specified" + ErrorLoggingFilepath = "error determining log file path" + ErrorLoggingLevelTooHigh = "logging level too high, setting to 5" + ErrorLoggingLevelTooLow = "logging level too low, setting to 0" + ErrorGettingFileInfo = "error getting file info" + ErrorFileIsDirectory = "listed file is a directory" + ErrorNoFilesSpecified = "no files or directories specified" + ErrorNoReadableFiles = "no readable files or directories specified" + ErrorUnsupportedProfileVersion = "profile version not supported" + ErrorProviderNotSpecified = "no valid provider specified" + ErrorBucketInfoNotSpecified = "bucket name or bucket region not specified" +) diff --git a/conf/helpers.go b/conf/helpers.go index 32bc0b4..e0f9ff7 100644 --- a/conf/helpers.go +++ b/conf/helpers.go @@ -1,46 +1,65 @@ package conf import ( + "fmt" + "os" + "path/filepath" "regexp" "strings" - "unicode" ) -func formatPath(p string) string { - p = strings.TrimPrefix(p, "/") - // Trimming ending slash if exists - p = strings.TrimSuffix(p, "/") - return p -} +func canCreate(path string) (bool, error) { + + filename := expandHome(path) + + filename, err := filepath.Abs(filename) + if err != nil { + return false, err + } + + // Resolve G304: Potential file inclusion via variable + if strings.Contains(filename, "..") { + return false, fmt.Errorf("invalid filename: %s", filename) + } + + _, err = os.Stat(filename) + if err != nil { + if os.IsNotExist(err) { + return true, nil + } + return false, err + } + + return false, fmt.Errorf("file %s already exists", filename) -func alphaNumericString(s string) string { - reg := regexp.MustCompile("[^a-zA-Z0-9]+") - return reg.ReplaceAllString(s, "") } -func capitalize(s string) string { - for i, v := range s { - return string(unicode.ToTitle(v)) + strings.ToLower(s[i+1:]) +func expandHome(path string) string { + if strings.HasPrefix(path, "~/") { + home, err := os.UserHomeDir() + if err != nil { + fmt.Println(err) + } + return strings.Replace(path, "~", home, 1) } - return Empty + return path +} + +func sanitizeString(s string) string { + reg := regexp.MustCompile("[^a-zA-Z0-9-+_]+") + return reg.ReplaceAllString(s, "") } +// tidyString takes a string and performs two operations on it: trimming any leading/trailing whitespace and converting it to lowercase. +// It then returns the resulting modified string. func tidyString(s string) string { - s = strings.TrimSpace(s) - s = strings.ToLower(s) - return s + return strings.TrimSpace(strings.ToLower(s)) } -func whichProvider(s string) ProviderName { - s = tidyString(s) - switch s { - case "aws", "amazon", "s3", "amazon s3": - return ProviderNameAWS - case "oci", "oracle", "oraclecloud", "oracle cloud", "oracle cloud infrastructure": - return ProviderNameOCI - case "akamai", "linode": - return ProviderNameAkamai - default: - return ProviderNameNone - } +func tidyUpString(s string) string { + return strings.TrimSpace(strings.ToUpper(s)) +} + +func trimPaths(path string) string { + return strings.TrimRight(path, "/") } diff --git a/conf/provider_akamai.go b/conf/provider_akamai.go deleted file mode 100644 index 3a7de9f..0000000 --- a/conf/provider_akamai.go +++ /dev/null @@ -1,79 +0,0 @@ -package conf - -import ( - "errors" - "strings" -) - -const ( - AkamaiClusterAmsterdam = "nl-ams-1.linodeobjects.com" - AkamaiClusterAtlanta = "us-southeast-1.linodeobjects.com" - AkamaiClusterChennai = "in-maa-1.linodeobjects.com" - AkamaiClusterChicago = "us-ord-1.linodeobjects.com" - AkamaiClusterFrankfurt = "eu-central-1.linodeobjects.com" - AkamaiClusterJakarta = "id-cgk-1.linodeobjects.com" - AkamaiClusterLosAngeles = "us-lax-1.linodeobjects.com" - AkamaiClusterMiami = "us-mia-1.linodeobjects.com" - AkamaiClusterMilan = "it-mil-1.linodeobjects.com" - AkamaiClusterNewark = "us-east-1.linodeobjects.com" - AkamaiClusterOsaka = "jp-osa-1.linodeobjects.com" - AkamaiClusterParis = "fr-par-1.linodeobjects.com" - AkamaiClusterSaoPaulo = "br-gru-1.linodeobjects.com" - AkamaiClusterSeattle = "us-sea-1.linodeobjects.com" - AkamaiClusterSingapore = "ap-south-1.linodeobjects.com" - AkamaiClusterStockholm = "se-sto-1.linodeobjects.com" - AkamaiClusterAshburn = "us-iad-1.linodeobjects.com" -) - -const ( - AkamaiRegionAmsterdam = "nl-ams-1" - AkamaiRegionAtlanta = "us-southeast-1" - AkamaiRegionChennai = "in-maa-1" - AkamaiRegionChicago = "us-ord-1" - AkamaiRegionFrankfurt = "eu-central-1" - AkamaiRegionJakarta = "id-cgk-1" - AkamaiRegionLosAngeles = "us-lax-1" - AkamaiRegionMiami = "us-mia-1" - AkamaiRegionMilan = "it-mil-1" - AkamaiRegionNewark = "us-east-1" - AkamaiRegionOsaka = "jp-osa-1" - AkamaiRegionParis = "fr-par-1" - AkamaiRegionSaoPaulo = "br-gru-1" - AkamaiRegionSeattle = "us-sea-1" - AkamaiRegionSingapore = "ap-south-1" - AkamaiRegionStockholm = "se-sto-1" - AkamaiRegionAshburn = "us-iad-1" -) - -func akamaiMatchRegion(region string) (endpoint string, err error) { - region = strings.ToLower(strings.TrimSpace(region)) - akamaiEndpointsMap := map[string]string{ - AkamaiRegionAmsterdam: AkamaiClusterAmsterdam, - AkamaiRegionAtlanta: AkamaiClusterAtlanta, - AkamaiRegionChennai: AkamaiClusterChennai, - AkamaiRegionChicago: AkamaiClusterChicago, - AkamaiRegionFrankfurt: AkamaiClusterFrankfurt, - AkamaiRegionJakarta: AkamaiClusterJakarta, - AkamaiRegionLosAngeles: AkamaiClusterLosAngeles, - AkamaiRegionMiami: AkamaiClusterMiami, - AkamaiRegionMilan: AkamaiClusterMilan, - AkamaiRegionNewark: AkamaiClusterNewark, - AkamaiRegionOsaka: AkamaiClusterOsaka, - AkamaiRegionParis: AkamaiClusterParis, - AkamaiRegionSaoPaulo: AkamaiClusterSaoPaulo, - AkamaiRegionSeattle: AkamaiClusterSeattle, - AkamaiRegionSingapore: AkamaiClusterSingapore, - AkamaiRegionStockholm: AkamaiClusterStockholm, - AkamaiRegionAshburn: AkamaiClusterAshburn, - } - - endpoint, ok := akamaiEndpointsMap[region] - if !ok { - return AkamaiClusterAshburn, errors.New(S("invalid akamai region: %q", region)) - } - return -} - -const ( - ErrorAkamaiKeyOrSecretNotSpecified = "akamai access keys not specified" -) diff --git a/conf/provider_aws.go b/conf/provider_aws.go index 04b0ea4..1f933fd 100644 --- a/conf/provider_aws.go +++ b/conf/provider_aws.go @@ -1,25 +1,50 @@ package conf import ( - "errors" - "strings" + "fmt" "github.com/aws/aws-sdk-go-v2/service/s3/types" ) -const ( - AwsACLPrivate = "private" - AwsACLPublicRead = "public-read" - AwsACLPublicReadWrite = "public-read-write" - AwsACLAuthenticatedRead = "authenticated-read" - AwsACLAwsExecRead = "aws-exec-read" - AwsACLBucketOwnerRead = "bucket-owner-read" - AwsACLBucketOwnerFullControl = "bucket-owner-full-control" -) +// ProviderAWS represents the AWS provider configuration. +// +// Fields: +// - Profile: The profile name used for authentication. +// - ACL: The access control list for the storage objects. +// - Storage: The storage class for the objects. +// - Key: The AWS access key ID. +// - Secret: The AWS secret access key. +type ProviderAWS struct { + Profile string + Key string + Secret string + ACL types.ObjectCannedACL + Storage types.StorageClass +} + +func (aws *ProviderAWS) build(inc *ProfileIncoming) error { + + err := aws.matchACL(inc.AWS.ACL) + if err != nil { + return err + } + + err = aws.matchStorage(inc.AWS.Storage) + if err != nil { + return err + } + + aws.Key = inc.Provider.Key + aws.Secret = inc.Provider.Secret + aws.Profile = inc.Provider.Profile + + return aws.validate() + +} // awsMatchACL will match the ACL string to the AWS ACL type. The constant values above are used to match the string. -func awsMatchACL(acl string) (cAcl types.ObjectCannedACL, err error) { - acl = strings.ToLower(strings.TrimSpace(acl)) +func (aws *ProviderAWS) matchACL(acl string) error { + awsCannedACLs := map[string]types.ObjectCannedACL{ AwsACLPrivate: types.ObjectCannedACLPrivate, AwsACLPublicRead: types.ObjectCannedACLPublicRead, @@ -30,29 +55,21 @@ func awsMatchACL(acl string) (cAcl types.ObjectCannedACL, err error) { AwsACLBucketOwnerFullControl: types.ObjectCannedACLBucketOwnerFullControl, } - cAcl, ok := awsCannedACLs[acl] + validAcl, ok := awsCannedACLs[tidyString(acl)] if !ok { - return types.ObjectCannedACLPrivate, errors.New(S("%s %q", InvalidAWSACL, acl)) + aws.ACL = types.ObjectCannedACLPrivate + return fmt.Errorf("%s %q", InvalidAWSACL, acl) } - return cAcl, nil -} + aws.ACL = validAcl -const ( - AwsClassStandard = "STANDARD" - AwsClassReducedRedundancy = "REDUCED_REDUNDANCY" - AwsClassGlacierIR = "GLACIER_IR" - AwsClassSnow = "SNOW" - AwsClassStandardIA = "STANDARD_IA" - AwsClassOneZoneIA = "ONEZONE_IA" - AwsClassIntelligentTiering = "INTELLIGENT_TIERING" - AwsClassGlacier = "GLACIER" - AwsClassDeepArchive = "DEEP_ARCHIVE" -) + return nil + +} // awsMatchStorage will match the storage class string to the AWS storage class type. The constant values above are // used to match the string. -func awsMatchStorage(class string) (sClass types.StorageClass, err error) { - class = strings.ToUpper(strings.TrimSpace(class)) +func (aws *ProviderAWS) matchStorage(class string) error { + awsStorageClasses := map[string]types.StorageClass{ AwsClassStandard: types.StorageClassStandard, AwsClassReducedRedundancy: types.StorageClassReducedRedundancy, @@ -65,15 +82,29 @@ func awsMatchStorage(class string) (sClass types.StorageClass, err error) { AwsClassSnow: types.StorageClassGlacier, } - sClass, ok := awsStorageClasses[class] + validClass, ok := awsStorageClasses[tidyUpString(class)] if !ok { - return types.StorageClassStandard, errors.New(S("%s %q", InvalidStorageClass, class)) + aws.Storage = types.StorageClassStandard + return fmt.Errorf("%s %q", InvalidStorageClass, class) } - return sClass, nil + aws.Storage = validClass + + return nil + } -const ( - InvalidAWSACL = "invalid aws acl" - ErrorAWSProfileAndKeys = "both aws profile and keys are specified, use profile or keys" - ErrorAWSKeyOrSecretNotSpecified = "profile should specified both key and secret" -) +func (aws *ProviderAWS) validate() error { + + if aws.Profile != Empty && (aws.Key != Empty || aws.Secret != Empty) { + return fmt.Errorf("bad AWS config: %v", ErrorAWSProfileAndKeys) + } + if aws.Profile == Empty && (aws.Key == Empty || aws.Secret == Empty) { + return fmt.Errorf("bad AWS config: %v", ErrorAWSMissingAuth) + } + if aws.Profile == Empty && aws.Key == Empty && aws.Secret == Empty { + return fmt.Errorf("bad AWS config: %v", ErrorAWSAuthNeeded) + } + + return nil + +} diff --git a/conf/provider_linode.go b/conf/provider_linode.go new file mode 100644 index 0000000..41eb9f9 --- /dev/null +++ b/conf/provider_linode.go @@ -0,0 +1,69 @@ +package conf + +import ( + "fmt" +) + +// ProviderLinode represents the Linode/Akamai provider configuration +type ProviderLinode struct { + Key string + Secret string + Endpoint string +} + +func (l *ProviderLinode) build(inc *ProfileIncoming) error { + + err := l.matchRegion(inc.Linode.Region) + if err != nil { + return err + } + + l.Key = inc.Provider.Key + l.Secret = inc.Provider.Secret + + return l.validate() + +} + +func (l *ProviderLinode) matchRegion(region string) error { + + linodeEndpointsMap := map[string]string{ + LinodeRegionAmsterdam: LinodeClusterAmsterdam, + LinodeRegionAtlanta: LinodeClusterAtlanta, + LinodeRegionChennai: LinodeClusterChennai, + LinodeRegionChicago: LinodeClusterChicago, + LinodeRegionFrankfurt: LinodeClusterFrankfurt, + LinodeRegionJakarta: LinodeClusterJakarta, + LinodeRegionLosAngeles: LinodeClusterLosAngeles, + LinodeRegionMiami: LinodeClusterMiami, + LinodeRegionMilan: LinodeClusterMilan, + LinodeRegionNewark: LinodeClusterNewark, + LinodeRegionOsaka: LinodeClusterOsaka, + LinodeRegionParis: LinodeClusterParis, + LinodeRegionSaoPaulo: LinodeClusterSaoPaulo, + LinodeRegionSeattle: LinodeClusterSeattle, + LinodeRegionSingapore: LinodeClusterSingapore, + LinodeRegionStockholm: LinodeClusterStockholm, + LinodeRegionAshburn: LinodeClusterAshburn, + } + + endpoint, ok := linodeEndpointsMap[tidyString(region)] + if !ok { + l.Endpoint = LinodeClusterAshburn + return fmt.Errorf("%s, %q", LinodeInvalidRegion, region) + } + l.Endpoint = endpoint + + return nil + +} + +func (l *ProviderLinode) validate() error { + + if l.Secret == Empty || l.Key == Empty { + return fmt.Errorf("bad Linode config: %v", LinodeAuthNeeded) + } + + return nil + +} diff --git a/conf/provider_oci.go b/conf/provider_oci.go index 0fb70ab..043e6c6 100644 --- a/conf/provider_oci.go +++ b/conf/provider_oci.go @@ -1,48 +1,78 @@ package conf import ( - "errors" "fmt" "strings" "github.com/oracle/oci-go-sdk/v49/objectstorage" ) -const ( - OciDefaultProfile = "DEFAULT" -) +// ProviderOCI represents the OCI provider configuration. +type ProviderOCI struct { + Profile string + Compartment string + Storage objectstorage.StorageTierEnum -const ( - ErrorOCICompartmentNotSpecified = "oracle cloud compartment will be tenancy root" - ErrorOCIAuthNotSpecified = "oracle cloud auth not specified" - ErrorOCIStorageNotSpecified = "oracle cloud storage tier is not valid" -) + PutStorage objectstorage.PutObjectStorageTierEnum +} -const ( - OracleStorageTierStandard = "standard" - OracleStorageTierInfrequentAccess = "infrequentaccess" // the case is strange because of the - OracleStorageTierArchive = "archive" -) +func (oci *ProviderOCI) build(inc *ProfileIncoming) error { + + err := oci.matchStorage(inc.OCI.Storage) + if err != nil { + return err + } + + oci.Profile = inc.Provider.Profile + if tidyUpString(oci.Profile) == OciDefaultProfile { + oci.Profile = OciDefaultProfile + } + + oci.Compartment = inc.OCI.Compartment + + return oci.validate() + +} // ociMatchStorage will match the Storage string to the OCI Storage Tier type. // The constant values above are used to match the string. -func ociMatchStorage(tier string) (ociTier objectstorage.StorageTierEnum, putTier objectstorage.PutObjectStorageTierEnum, err error) { +func (oci *ProviderOCI) matchStorage(tier string) error { + tier = strings.ToLower(strings.TrimSpace(tier)) + ociStorageTiersMap := map[string]objectstorage.StorageTierEnum{ OracleStorageTierStandard: objectstorage.StorageTierStandard, OracleStorageTierInfrequentAccess: objectstorage.StorageTierInfrequentAccess, OracleStorageTierArchive: objectstorage.StorageTierArchive, } + ociPutStorageTiersMap := map[string]objectstorage.PutObjectStorageTierEnum{ OracleStorageTierStandard: objectstorage.PutObjectStorageTierStandard, OracleStorageTierInfrequentAccess: objectstorage.PutObjectStorageTierInfrequentaccess, OracleStorageTierArchive: objectstorage.PutObjectStorageTierArchive, } - ociTier, ok := ociStorageTiersMap[tier] - putTier, _ = ociPutStorageTiersMap[tier] + storeTier, ok := ociStorageTiersMap[tier] if !ok { - return objectstorage.StorageTierStandard, objectstorage.PutObjectStorageTierStandard, errors.New(fmt.Sprintf("%s %q", ErrorOCIStorageNotSpecified, tier)) + oci.Storage = objectstorage.StorageTierStandard + oci.PutStorage = objectstorage.PutObjectStorageTierStandard + return fmt.Errorf("%s %q", ErrorOCIStorageNotSpecified, tier) + } + + putTier, _ := ociPutStorageTiersMap[tier] + + oci.Storage = storeTier + oci.PutStorage = putTier + + return nil + +} + +func (oci *ProviderOCI) validate() error { + + if oci.Profile == Empty { + return fmt.Errorf("bad OCI configuration: %v", ErrorOCIAuthNotSpecified) } - return ociTier, putTier, nil + return nil + } diff --git a/conf/readconfig.go b/conf/readconfig.go deleted file mode 100644 index 1748df8..0000000 --- a/conf/readconfig.go +++ /dev/null @@ -1,281 +0,0 @@ -package conf - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/orme292/s3packer/logbot" - "gopkg.in/yaml.v3" -) - -// loadProfile() reads the profile file and returns a readConfig struct. The only validated fields are the logging fields, -// version, and the files and directories. The rest of the fields are left as they are until each individual -// method is called. -func (rc *readConfig) loadProfile(file string) (err error) { - rc.Logging.Console = true - rc.Logging.File = false - rc.Logging.Level = int(logbot.WARN) - - file, err = filepath.Abs(file) - if err != nil { - return errors.New(S("%s: %s", ErrorProfilePath, err.Error())) - } - - f, err := os.ReadFile(filepath.Clean(file)) - if err != nil { - return errors.New(S("%s: %s", ErrorOpeningProfile, err.Error())) - } - - err = yaml.Unmarshal(f, &rc) - if err != nil { - return errors.New(S("%s: %s", ErrorReadingYaml, err.Error())) - } - - err = rc.validateLogging() - rc.Log = &logbot.LogBot{ - Level: logbot.ParseIntLevel(rc.Logging.Level), - FlagConsole: rc.Logging.Console, - FlagFile: rc.Logging.File, - Path: rc.Logging.Filepath, - } - if err != nil { - rc.Log.Warn(err.Error()) - } - - err = rc.validateFiles() - if err != nil { - return err - } - return nil -} - -// transposeStructBucket() returns a Bucket struct. If the bucket name or region is not specified, an error is returned. -// Create is not implemented, so its value doesn't matter. -func (rc *readConfig) transposeStructBucket() (b *Bucket, err error) { - if rc.Bucket.Name == Empty || rc.Bucket.Region == Empty { - return nil, errors.New(ErrorBucketNotSpecified) - } - return &Bucket{ - Create: rc.Bucket.Create, - Name: rc.Bucket.Name, - Region: rc.Bucket.Region, - }, nil -} - -// transposeStructFileTargets() returns a slice of files and directories to be uploaded. If no files or directories are specified, -// an error is returned. -// Directories and Folders slices are consolidated here, since they are just two different names for the same thing. -// TODO: Check files and dirs for duplicate entries. -// TODO: Add support for globs. -func (rc *readConfig) transposeStructFileTargets() (files, dirs []string, err error) { - for _, file := range rc.Uploads.Files { - s, err := os.Stat(file) - if err != nil { - rc.Log.Warn("%s: %q", ErrorGettingFileInfo, file) - } else { - if s.IsDir() == true { - dirs = append(dirs, strings.TrimRight(file, "/")) - rc.Log.Warn("%s: %q", ErrorFileIsDirectory, file) - } else { - files = append(files, file) - } - } - } - for _, folder := range rc.Uploads.Folders { - dirs = append(dirs, strings.TrimRight(folder, "/")) - } - for _, dir := range rc.Uploads.Directories { - dirs = append(dirs, strings.TrimRight(dir, "/")) - } - if len(files) == 0 && len(dirs) == 0 { - return nil, nil, errors.New(ErrorNoReadableFiles) - } - return -} - -// transposeStructLogging() returns a LogOpts struct. If the logging file is enabled and the path is specified, then the -// path is converted to an absolute path. Any actual validation is handled elsewhere. -func (rc *readConfig) transposeStructLogging() (lo *LogOpts, err error) { - var abs string - if rc.Logging.File == true && rc.Logging.Filepath != Empty { - abs, err = filepath.Abs(filepath.Clean(rc.Logging.Filepath)) - if err != nil { - return nil, errors.New(S("%s: %s", ErrorLoggingFilepath, err.Error())) - } - } - - return &LogOpts{ - Level: logbot.ParseIntLevel(rc.Logging.Level), - Console: rc.Logging.Console, - File: rc.Logging.File, - Filepath: abs, - }, nil -} - -// transposeStructObjects() returns an Objects struct. If the naming method is not specified, then the default is used, but -// an error is returned. -func (rc *readConfig) transposeStructObjects() (o *Objects, err error) { - var method Naming - switch strings.ToLower(strings.Trim(rc.Objects.Naming, " ")) { - case NamingAbsolute.String(): - method = NamingAbsolute - case NamingRelative.String(): - method = NamingRelative - default: - method = NamingAbsolute - err = errors.New(InvalidNamingMethod) - } - return &Objects{ - NamePrefix: strings.TrimPrefix(rc.Objects.NamePrefix, "/"), - RootPrefix: formatPath(rc.Objects.RootPrefix), - Naming: method, - OmitRootDir: rc.Objects.OmitRootDir, - }, err -} - -// transposeStructOpts() returns an Opts struct. If the overwrite method is not specified, then the default is used, and the -// default should always be to never overwrite an object. OverwriteChecksum support is not implemented. A MaxParts -// value greater or less than 1 is not supported. -func (rc *readConfig) transposeStructOpts() (opts *Opts, err error) { - overwrite := OverwriteNever - - switch tidyString(rc.Options.Overwrite) { - case OverwriteAlways.String(): - overwrite = OverwriteAlways - case OverwriteNever.String(): - overwrite = OverwriteNever - default: - err = errors.New(InvalidOverwriteMethod) - } - - return &Opts{ - MaxParts: rc.Options.MaxParts, - MaxUploads: rc.Options.MaxUploads, - Overwrite: overwrite, - }, err -} - -// transposeStructProvider() returns a Provider struct. If the provider is not specified, then an error is returned. -// There is only support for a single provider right now, so there's no real complexity here. -func (rc *readConfig) transposeStructProvider() (p *Provider, err error) { - switch whichProvider(rc.Provider) { - case ProviderNameAWS: - err = rc.validateProviderAWS() - if err != nil { - return nil, err - } - return rc.buildProviderAWS(), err - case ProviderNameOCI: - err = rc.validateProviderOCI() - if err != nil { - return nil, err - } - return rc.buildProviderOCI(), err - case ProviderNameAkamai: - err = rc.validateProviderAkamai() - if err != nil { - return nil, err - } - return rc.buildProviderAkamai(), err - default: - return &Provider{Is: ProviderNameNone}, errors.New(ErrorProviderNotSpecified) - } -} - -func (rc *readConfig) buildProviderAWS() (p *Provider) { - acl, err := awsMatchACL(rc.AWS.ACL) - if err != nil { - rc.Log.Warn(err.Error()) - } - - class, err := awsMatchStorage(rc.AWS.Storage) - if err != nil { - rc.Log.Warn(err.Error()) - } - - return &Provider{ - Is: ProviderNameAWS, - AWS: &ProviderAWS{ - Profile: rc.AWS.Profile, - Key: rc.AWS.Key, - Secret: rc.AWS.Secret, - ACL: acl, - Storage: class, - }, - Key: rc.AWS.Key, - Secret: rc.AWS.Secret, - } -} - -func (rc *readConfig) buildProviderOCI() (p *Provider) { - if strings.TrimSpace(strings.ToUpper(rc.OCI.Profile)) == OciDefaultProfile { - rc.OCI.Profile = OciDefaultProfile - } - - tier, put, err := ociMatchStorage(rc.OCI.Storage) - if err != nil { - rc.Log.Warn(err.Error()) - } - - return &Provider{ - Is: ProviderNameOCI, - OCI: &ProviderOCI{ - Profile: strings.TrimSpace(rc.OCI.Profile), - Compartment: rc.OCI.Compartment, - Storage: tier, - PutStorage: put, - }, - } -} - -func (rc *readConfig) buildProviderAkamai() (p *Provider) { - if strings.ToLower(strings.TrimSpace(rc.Bucket.Region)) == Empty { - rc.Bucket.Region = AkamaiRegionAshburn - } - endpoint, err := akamaiMatchRegion(rc.Bucket.Region) - if err != nil { - rc.Log.Warn(err.Error()) - } - - return &Provider{ - Is: ProviderNameAkamai, - Akamai: &ProviderAkamai{ - Key: rc.Akamai.Key, - Secret: rc.Akamai.Secret, - Endpoint: endpoint, - }, - } -} - -// transposeStructTagOpts() returns a TagOpts struct. -func (rc *readConfig) transposeStructTagOpts() (t *TagOpts, err error) { - return &TagOpts{ - ChecksumSHA256: rc.Tagging.ChecksumSHA256, - AwsChecksumAlgorithm: types.ChecksumAlgorithmSha256, - AwsChecksumMode: types.ChecksumModeEnabled, - Origins: rc.Tagging.Origins, - }, nil -} - -// transposeStructTags() returns a map of valid string tags. This is both a get and validate method. It removes -// unsupported symbols from the tag key/values specified in the profile. Changes are logged as Warnings, but they -// don't halt execution. -func (rc *readConfig) transposeStructTags() (tags map[string]string, err error) { - tags = make(map[string]string) - for k, v := range rc.Tagging.Tags { - newKey := alphaNumericString(k) - newValue := alphaNumericString(v) - if newKey != k { - rc.Log.Warn(fmt.Sprintf("%s: %q is now %q", InvalidTagChars, k, newKey)) - } - if newValue != v { - rc.Log.Warn(fmt.Sprintf("%s: %q is now %q", InvalidTagChars, v, newValue)) - } - tags[newKey] = newValue - } - return tags, nil -} diff --git a/conf/type_appconfig.go b/conf/type_appconfig.go new file mode 100644 index 0000000..f9ad56e --- /dev/null +++ b/conf/type_appconfig.go @@ -0,0 +1,123 @@ +package conf + +import ( + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/orme292/s3packer/logbot" + "github.com/rs/zerolog" +) + +type AppConfig struct { + Provider *Provider + Opts *Opts + Bucket *Bucket + Objects *Objects + TagOpts *TagOpts + Tags Tags + LogOpts *LogOpts + Paths []string + Files []string + Dirs []string + + Log *logbot.LogBot +} + +// NewAppConfig returns a new AppConfig object with preconfigured defaults. +func NewAppConfig() *AppConfig { + + return &AppConfig{ + Provider: &Provider{ + Is: ProviderNameNone, + AWS: &ProviderAWS{}, + OCI: &ProviderOCI{}, + Linode: &ProviderLinode{}, + }, + Opts: &Opts{ + MaxParts: 10, + MaxUploads: 5, + Overwrite: OverwriteNever, + }, + Bucket: &Bucket{ + Create: false, + }, + Objects: &Objects{ + NamingType: NamingNone, + }, + Tags: make(Tags), + TagOpts: &TagOpts{ + ChecksumSHA256: true, + AwsChecksumAlgorithm: types.ChecksumAlgorithmSha256, + AwsChecksumMode: types.ChecksumModeEnabled, + OriginPath: true, + }, + LogOpts: &LogOpts{ + Level: zerolog.ErrorLevel, + Console: true, + File: false, + Filepath: "/var/log/s3packer.log", + }, + Log: &logbot.LogBot{ + Level: zerolog.ErrorLevel, + FlagConsole: true, + FlagFile: false, + Path: "/var/log/s3packer.log", + }, + } + +} + +func (ac *AppConfig) ImportFromProfile(inc *ProfileIncoming) error { + + var err error + + err = ac.LogOpts.build(inc) + if err != nil { + return err + } + + ac.Log.Level = ac.LogOpts.Level + ac.Log.FlagConsole = ac.LogOpts.Console + ac.Log.FlagFile = ac.LogOpts.File + ac.Log.Path = ac.LogOpts.Filepath + + err = ac.Provider.build(inc) + if err != nil { + return err + } + + err = ac.Opts.build(inc) + if err != nil { + return err + } + + err = ac.Bucket.build(inc, ac.Provider.Is) + if err != nil { + return err + } + + err = ac.Objects.build(inc) + if err != nil { + return err + } + + err = ac.Tags.build(inc.Tags) + if err != nil { + return err + } + + err = ac.TagOpts.build(inc) + if err != nil { + return err + } + + if len(inc.Files) == 0 && len(inc.Dirs) == 0 { + return fmt.Errorf("bad profile config: %s", ErrorNoFilesSpecified) + } + + ac.Files = inc.Files + ac.Dirs = inc.Dirs + + return nil + +} diff --git a/conf/type_bucket.go b/conf/type_bucket.go new file mode 100644 index 0000000..48d2bff --- /dev/null +++ b/conf/type_bucket.go @@ -0,0 +1,34 @@ +package conf + +import ( + "fmt" +) + +// Bucket contains all details related to the bucket, for any provider. Create is not implemented. +type Bucket struct { + Create bool + Name string + Region string +} + +func (b *Bucket) build(inc *ProfileIncoming, pn ProviderName) error { + + b.Name = inc.Bucket.Name + b.Create = inc.Bucket.Create + b.Region = inc.Bucket.Region + + return b.validate(pn) +} + +func (b *Bucket) validate(pn ProviderName) error { + + if b.Name == Empty { + return fmt.Errorf("bad bucket config: %v", ErrorBucketInfoNotSpecified) + } + if b.Region == Empty && (pn == ProviderNameAWS || pn == ProviderNameLinode) { + return fmt.Errorf("bad bucket config: %v", ErrorBucketInfoNotSpecified) + } + + return nil + +} diff --git a/conf/type_logopts.go b/conf/type_logopts.go new file mode 100644 index 0000000..d03386b --- /dev/null +++ b/conf/type_logopts.go @@ -0,0 +1,36 @@ +package conf + +import ( + "fmt" + + "github.com/orme292/s3packer/logbot" + "github.com/rs/zerolog" +) + +// LogOpts contains the logging configuration, but not an instance of logbot. +type LogOpts struct { + Level zerolog.Level + Console bool + File bool + Filepath string +} + +func (lo *LogOpts) build(inc *ProfileIncoming) error { + + lo.Level = logbot.ParseIntLevel(inc.Logging.Level) + lo.Console = inc.Logging.OutputToConsole + lo.File = inc.Logging.OutputToFile + lo.Filepath = inc.Logging.Path + + return lo.validate() + +} + +func (lo *LogOpts) validate() error { + + if lo.File && lo.Filepath == Empty { + return fmt.Errorf("bad logging config: %s", ErrorLoggingFilepathNotSpecified) + } + return nil + +} diff --git a/conf/type_objects.go b/conf/type_objects.go new file mode 100644 index 0000000..9adbafc --- /dev/null +++ b/conf/type_objects.go @@ -0,0 +1,64 @@ +package conf + +import ( + "fmt" + "strings" +) + +// Naming type is a string enum of the supported object naming methods. +type Naming string + +const ( + NamingRelative Naming = "relative" + NamingAbsolute Naming = "absolute" + NamingNone Naming = "none" +) + +// String returns the string representation of the Naming object. +// It converts the Naming object to a string by using the underlying string value. +func (n Naming) String() string { + return string(n) +} + +// Objects contain the object naming configuration. +type Objects struct { + NamingType Naming + NamePrefix string + PathPrefix string + + // OmitRootDir is used to remove the root directory name from the object's final FormattedKey. + OmitRootDir bool +} + +func (o *Objects) build(inc *ProfileIncoming) error { + switch tidyString(inc.Objects.NamingType) { + + case NamingAbsolute.String(), "abs": + o.NamingType = NamingAbsolute + + case NamingRelative.String(), "rel": + o.NamingType = NamingRelative + + default: + o.NamingType = NamingNone + + } + + o.NamePrefix = strings.TrimPrefix(inc.Objects.NamePrefix, "/") + o.PathPrefix = strings.TrimPrefix(inc.Objects.PathPrefix, "/") + o.PathPrefix = strings.TrimSuffix(inc.Objects.PathPrefix, "/") + o.OmitRootDir = inc.Objects.OmitRootDir + + return o.validate() + +} + +func (o *Objects) validate() error { + + if o.NamingType == NamingNone { + return fmt.Errorf("bad objects config: %v", InvalidNamingType) + } + + return nil + +} diff --git a/conf/type_opts.go b/conf/type_opts.go new file mode 100644 index 0000000..c860c36 --- /dev/null +++ b/conf/type_opts.go @@ -0,0 +1,62 @@ +package conf + +import ( + "fmt" +) + +// Overwrite type is a string enum of the supported overwrite methods. OverwriteChecksum is not implemented. +// Overwrite.String() will return the string representation of the enum for convenience, either in output or logging. +type Overwrite string + +const ( + OverwriteChecksum Overwrite = "checksum" + OverwriteNever Overwrite = "never" + OverwriteAlways Overwrite = "always" +) + +func (o Overwrite) String() string { + return string(o) +} + +// Opts contains application level configuration options. +type Opts struct { + MaxParts int + MaxUploads int + Overwrite Overwrite +} + +func (o *Opts) build(inc *ProfileIncoming) error { + + switch tidyString(inc.Options.OverwriteObjects) { + + case OverwriteAlways.String(), "yes", "true": + o.Overwrite = OverwriteAlways + + case OverwriteNever.String(), "no", "false": + o.Overwrite = OverwriteNever + + // checksum not supported yet + case Empty: + return fmt.Errorf("bad options config: %s", InvalidOverwriteMethod) + + default: + o.Overwrite = OverwriteNever + + } + + return nil + +} + +func (o *Opts) validate() error { + if o.MaxParts <= 0 { + return fmt.Errorf("MaxParts must be at least 1") + } + if o.MaxUploads <= 0 { + return fmt.Errorf("MaxUploads must be at least 1") + } + if o.Overwrite != OverwriteChecksum && o.Overwrite != OverwriteNever && o.Overwrite != OverwriteAlways { + return fmt.Errorf("OverwriteObjects value should be \"never\" or \"always\": %q", o.Overwrite) + } + return nil +} diff --git a/conf/type_profile.go b/conf/type_profile.go new file mode 100644 index 0000000..29ca9c0 --- /dev/null +++ b/conf/type_profile.go @@ -0,0 +1,146 @@ +package conf + +import ( + "fmt" + "os" + "path/filepath" + + "gopkg.in/yaml.v3" +) + +type ProfileIncoming struct { + Version int `yaml:"Version"` + + Provider struct { + Use string `yaml:"Use"` + Profile string `yaml:"Profile"` + Key string `yaml:"Key"` + Secret string `yaml:"Secret"` + } `yaml:"Provider"` + + AWS struct { + ACL string `yaml:"ACL"` + Storage string `yaml:"Storage"` + } `yaml:"AWS"` + + OCI struct { + Compartment string `yaml:"Compartment"` + Storage string `yaml:"Storage"` + } `yaml:"OCI"` + + Linode struct { + Region string `yaml:"Region"` + } `yaml:"Linode"` + + Bucket struct { + Create bool `yaml:"Create"` + Name string `yaml:"Name"` + Region string `yaml:"Region"` + } `yaml:"Bucket"` + + Options struct { + MaxParts int `yaml:"MaxParts"` + MaxUploads int `yaml:"MaxUploads"` + OverwriteObjects string `yaml:"OverwriteObjects"` + } `yaml:"Options"` + + TagOptions struct { + OriginPath bool `yaml:"OriginPath"` + ChecksumSHA256 bool `yaml:"ChecksumSHA256"` + } `yaml:"Tagging"` + + Tags map[string]string `yaml:"Tags"` + + Objects struct { + NamingType string `yaml:"NamingType"` + NamePrefix string `yaml:"NamePrefix"` + PathPrefix string `yaml:"PathPrefix"` + OmitRootDir bool `yaml:"OmitRootDir"` + } `yaml:"Objects"` + + Logging struct { + Level int `yaml:"Level"` + OutputToConsole bool `yaml:"OutputToConsole"` + OutputToFile bool `yaml:"OutputToFile"` + Path string `yaml:"Path"` + } `yaml:"Logging"` + + Files []string `yaml:"Files"` + Dirs []string `yaml:"Dirs"` +} + +func NewProfile() *ProfileIncoming { + return &ProfileIncoming{} +} + +func (p *ProfileIncoming) LoadFromYaml(filename string) error { + filename, err := filepath.Abs(filename) + if err != nil { + return fmt.Errorf("%s: %v", ErrorProfilePath, err) + } + + f, err := os.ReadFile(filename) + if err != nil { + return fmt.Errorf("%s: %v", ErrorOpeningProfile, err) + } + + err = yaml.Unmarshal(f, p) + if err != nil { + return fmt.Errorf("%s: %v", ErrorReadingYaml, err) + } + + return nil +} + +func (p *ProfileIncoming) loadSampleData() { + + p.Provider.Use = "aws" + p.Provider.Profile = "myAwsProfile" + p.Provider.Key = "key_value" + p.Provider.Secret = "secret_value" + + p.AWS.ACL = "private" + p.AWS.Storage = "intelligent_tiering" + + p.OCI.Compartment = "ocid1.compartment.oc1..aaaaaaaaa2qfwzyec6js1ua2ybtyyh3m39ze" + p.OCI.Storage = "standard" + + p.Linode.Region = "us-lax-1" + + p.Bucket.Create = true + p.Bucket.Region = "us-lax-1" + p.Bucket.Name = "MyBackupBucket" + + p.Options.MaxParts = 10 + p.Options.MaxUploads = 5 + p.Options.OverwriteObjects = "never" + + p.TagOptions.OriginPath = true + p.TagOptions.ChecksumSHA256 = true + + p.Tags = map[string]string{ + "Author": "Forrest Gump", + "Title": "Letters to Jenny", + } + + p.Objects.NamingType = "relative" + p.Objects.NamePrefix = "backup-" + p.Objects.PathPrefix = "/backups/april/2023" + p.Objects.OmitRootDir = true + + p.Logging.Level = 4 + p.Logging.OutputToFile = true + p.Logging.OutputToConsole = true + p.Logging.Path = "/var/log/s3packer.log" + + p.Files = []string{ + "/documents/to_jenny/letter_1.doc", + "/documents/to_jenny/letter_2.doc", + "/documents/to_jenny/letter_3.doc", + } + p.Dirs = []string{ + "/documents/from_jenny", + "/documents/stock_certificates", + } + +} diff --git a/conf/type_provider.go b/conf/type_provider.go new file mode 100644 index 0000000..7dc0200 --- /dev/null +++ b/conf/type_provider.go @@ -0,0 +1,79 @@ +package conf + +import ( + "fmt" +) + +// ProviderName subtype, for quickly matching providers +type ProviderName string + +const ( + ProviderNameNone ProviderName = "none" + ProviderNameAWS ProviderName = "aws" + ProviderNameOCI ProviderName = "oci" + ProviderNameLinode ProviderName = "linode" +) + +func (pn ProviderName) String() string { + return string(pn) +} + +// Provider represents the configuration for a provider. +// +// Fields: +// - Is (ProviderName): The name of the provider. (e.g., "AWS", "OCI") +// - AWS (*ProviderAWS): The configuration for AWS. +// - OCI (*ProviderOCI): The configuration for OCI. +// - Key (string): The provider key. +// - Secret (string): The provider secret. +// +// Usage examples can be found in the surrounding code. +type Provider struct { + Is ProviderName + AWS *ProviderAWS + OCI *ProviderOCI + Linode *ProviderLinode +} + +func (p *Provider) build(inc *ProfileIncoming) error { + + p.match(inc.Provider.Use) + if p.Is == ProviderNameNone { + return fmt.Errorf("error loading profile: %v", ErrorProviderNotSpecified) + } + + switch p.Is { + + case ProviderNameAWS: + p.AWS = &ProviderAWS{} + return p.AWS.build(inc) + + case ProviderNameOCI: + p.OCI = &ProviderOCI{} + return p.OCI.build(inc) + + case ProviderNameLinode: + p.Linode = &ProviderLinode{} + return p.Linode.build(inc) + + default: + return fmt.Errorf("could not build profile: %v", ErrorProviderNotSpecified) + + } + +} + +func (p *Provider) match(s string) { + + switch tidyString(s) { + case "aws", "amazon", "s3", "amazon s3": + p.Is = ProviderNameAWS + case "oci", "oracle", "oracle cloud": + p.Is = ProviderNameOCI + case "akamai", "linode", "linode objects": + p.Is = ProviderNameLinode + default: + p.Is = ProviderNameNone + } + +} diff --git a/conf/type_tagopts.go b/conf/type_tagopts.go new file mode 100644 index 0000000..dc7ca67 --- /dev/null +++ b/conf/type_tagopts.go @@ -0,0 +1,33 @@ +package conf + +import ( + "github.com/aws/aws-sdk-go-v2/service/s3/types" +) + +// TagOpts contain the object tagging configuration, but only the ones handled internally by the application. +// Custom tags are put in a separate map named "Tags" inside the AppConfig struct. +type TagOpts struct { + ChecksumSHA256 bool + OriginPath bool + + AwsChecksumAlgorithm types.ChecksumAlgorithm + AwsChecksumMode types.ChecksumMode +} + +func (to *TagOpts) build(inc *ProfileIncoming) error { + + to.OriginPath = inc.TagOptions.OriginPath + to.ChecksumSHA256 = inc.TagOptions.ChecksumSHA256 + to.AwsChecksumAlgorithm = types.ChecksumAlgorithmSha256 + to.AwsChecksumMode = types.ChecksumModeEnabled + + return to.validate() + +} + +func (to *TagOpts) validate() error { + + // nothing to validate yet + return nil + +} diff --git a/conf/type_tags.go b/conf/type_tags.go new file mode 100644 index 0000000..58890cf --- /dev/null +++ b/conf/type_tags.go @@ -0,0 +1,33 @@ +package conf + +import ( + "fmt" +) + +type Tags map[string]string + +func (t *Tags) Get() map[string]string { + return *t +} + +func (t *Tags) build(tags map[string]string) error { + + for k, v := range tags { + (*t)[sanitizeString(k)] = sanitizeString(v) + } + + return t.validate() + +} + +func (t *Tags) validate() error { + + for key := range *t { + if tidyString(key) == "s3p-checksum-sha256" || tidyString(key) == "s3p-origin-path" { + return fmt.Errorf("reserved tag '%s' cannot be used", key) + } + } + + return nil + +} diff --git a/conf/types.go b/conf/types.go deleted file mode 100644 index ad93139..0000000 --- a/conf/types.go +++ /dev/null @@ -1,334 +0,0 @@ -package conf - -import ( - "fmt" - - "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/oracle/oci-go-sdk/v49/objectstorage" - "github.com/orme292/s3packer/logbot" - "github.com/rs/zerolog" -) - -/* -createProfile is used to write out a sample configuration profile. -It is based on readConfig and will only include required fields rather than hidden, optional, or unsupported fields. -*/ -type createProfile struct { - Version int `yaml:"Version"` - Provider string `yaml:"Provider"` - AWS struct { - Profile string `yaml:"Profile"` - Key string `yaml:"Key"` - Secret string `yaml:"Secret"` - ACL string `yaml:"ACL"` - Storage string `yaml:"Storage"` - } `yaml:"AWS"` - OCI struct { - Profile string `yaml:"Profile"` - Compartment string `yaml:"Compartment"` - Storage string `yaml:"Storage"` - } `yaml:"OCI"` - Akamai struct { - Key string `yaml:"Key"` - Secret string `yaml:"Secret"` - } `yaml:"Akamai"` - Bucket struct { - Create bool `yaml:"Create"` - Name string `yaml:"Name"` - Region string `yaml:"Region"` - } `yaml:"Bucket"` - Options struct { - MaxUploads int `yaml:"MaxUploads"` - Overwrite string `yaml:"Overwrite"` - } `yaml:"Options"` - Tagging struct { - ChecksumSHA256 bool `yaml:"Checksum"` - Origins bool `yaml:"Origins"` - Tags map[string]string `yaml:"Tags"` - } `yaml:"Tagging"` - Objects struct { - NamePrefix string `yaml:"NamePrefix"` - RootPrefix string `yaml:"RootPrefix"` - Naming string `yaml:"Naming"` - OmitOriginDirectory bool `yaml:"OmitRootDir"` - } `yaml:"Objects"` - Logging struct { - Level int `yaml:"Level"` - Console bool `yaml:"Console"` - File bool `yaml:"File"` - Filepath string `yaml:"Filepath"` - } `yaml:"Logging"` - Uploads struct { - Files []string `yaml:"Files"` - Directories []string `yaml:"Directories"` - } `yaml:"Uploads"` -} - -/* -readConfig is only used to unmarshal a YAML profile, it is not used in the application. -*/ -type readConfig struct { - // Version will be used for feature support - Version int `yaml:"Version"` - Provider string `yaml:"Provider"` - - // AWS will contain only AWS specific configuration details. Other providers will have their own - // struct and fields. - AWS struct { - Profile string `yaml:"Profile"` - Key string `yaml:"Key"` - Secret string `yaml:"Secret"` - ACL string `yaml:"ACL"` - Storage string `yaml:"Storage"` - } `yaml:"AWS"` - - // OCI will contain only OCI specific configuration details. Other providers will have their own - OCI struct { - Profile string `yaml:"Profile"` - Compartment string `yaml:"Compartment"` - Storage string `yaml:"Storage"` - } `yaml:"OCI"` - - // Akamai will contain only Linode and Akamai specific configuration details. - Akamai struct { - Key string `yaml:"Key"` - Secret string `yaml:"Secret"` - } `yaml:"Akamai"` - - // Bucket should be universal across all providers, though there may be different fields depending on the - // provider. - Bucket struct { - Create bool `yaml:"Create"` - Name string `yaml:"Name"` - Region string `yaml:"Region"` - } `yaml:"Bucket"` - - // The Objects struct contains object level configuration details, mostly related to object naming - // Note: Object tags will be handled in the Tagging struct - Objects struct { - NamePrefix string `yaml:"NamePrefix"` - RootPrefix string `yaml:"RootPrefix"` - Naming string `yaml:"Naming"` - OmitRootDir bool `yaml:"OmitRootDir"` - } `yaml:"Objects"` - - // The Options struct is used to configure the application and how it operates. - Options struct { - MaxParts int `yaml:"MaxParts"` - MaxUploads int `yaml:"MaxUploads"` - Overwrite string `yaml:"Overwrite"` - } `yaml:"Options"` - - // Tagging is used only for object tagging. - Tagging struct { - ChecksumSHA256 bool `yaml:"ChecksumSHA256"` - Origins bool `yaml:"Origins"` - Tags map[string]string `yaml:"Tags"` - } `yaml:"Tagging"` - - // The Uploads struct contains the list of files, folders, and directories to upload. - // Folders and Directories will be merged. - Uploads struct { - Files []string `yaml:"Files"` - Folders []string `yaml:"Folders"` - Directories []string `yaml:"Directories"` - } `yaml:"Uploads"` - - // Logging is used to configure the logging output, which is handled by the 'logbot' package. - Logging struct { - Level int `yaml:"Level"` - Console bool `yaml:"Console"` - File bool `yaml:"File"` - Filepath string `yaml:"Filepath"` - } `yaml:"Logging"` - - // Log is an instance of the logger. - Log *logbot.LogBot -} - -// Provider represents the configuration for a provider. -// -// Fields: -// - Is (ProviderName): The name of the provider. (e.g., "AWS", "OCI") -// - AWS (*ProviderAWS): The configuration for AWS. -// - OCI (*ProviderOCI): The configuration for OCI. -// - Key (string): The provider key. -// - Secret (string): The provider secret. -// -// Usage examples can be found in the surrounding code. -type Provider struct { - Is ProviderName - AWS *ProviderAWS - OCI *ProviderOCI - Akamai *ProviderAkamai - Key string - Secret string -} - -// ProviderAWS represents the AWS provider configuration. -// -// Fields: -// - Profile: The profile name used for authentication. -// - ACL: The access control list for the storage objects. -// - Storage: The storage class for the objects. -// - Key: The AWS access key ID. -// - Secret: The AWS secret access key. -type ProviderAWS struct { - Profile string - ACL types.ObjectCannedACL - Storage types.StorageClass - Key string - Secret string -} - -// ProviderOCI represents the OCI provider configuration. -type ProviderOCI struct { - Profile string - Compartment string - Storage objectstorage.StorageTierEnum - - // this is ridiculous, but the OCI SDK has a separate enum for PUT requests. - PutStorage objectstorage.PutObjectStorageTierEnum -} - -// ProviderAkamai represents the Linode/Akamai provider configuration -type ProviderAkamai struct { - Key string - Secret string - Endpoint string -} - -// Bucket contains all details related to the bucket, for any provider. Create is not implemented. -type Bucket struct { - Create bool - Name string - Region string -} - -// Objects contain the object naming configuration. -type Objects struct { - NamePrefix string - RootPrefix string - Naming Naming - - // OmitRootDir is used to remove the root directory name from the object's final FormattedKey. - OmitRootDir bool -} - -// Opts contains application level configuration options. -type Opts struct { - MaxParts int - MaxUploads int - Overwrite Overwrite -} - -// TagOpts contain the object tagging configuration, but only the ones handled internally by the application. -// Custom tags are put in a separate map named "Tags" inside the AppConfig struct. -type TagOpts struct { - ChecksumSHA256 bool - AwsChecksumAlgorithm types.ChecksumAlgorithm - AwsChecksumMode types.ChecksumMode - Origins bool -} - -// LogOpts contains the logging configuration, but not an instance of logbot. -type LogOpts struct { - Level zerolog.Level - Console bool - File bool - Filepath string -} - -// AppConfig is an application level struct that all profile configuration details are loaded into. -// Log is an instance zerolog.Logger, which is built in logbot. -// The Files and Directories structs are the list of files and directories to upload. -type AppConfig struct { - Provider *Provider - Bucket *Bucket - Objects *Objects - Opts *Opts - Tags map[string]string - Tag *TagOpts - LogOpts *LogOpts - Log *logbot.LogBot - Files []string - Directories []string -} - -// ProviderName type is a string enum of the supported providers, meant to make it easier to check -// AppConfig.Provider.Is when figuring out which provider config fields should be used. -// ProviderName.String() will return the string representation of the enum for convenience, either in output or logging. -type ProviderName string - -const ( - ProviderNameNone ProviderName = "none" - ProviderNameAWS ProviderName = "aws" - ProviderNameOCI ProviderName = "oci" - ProviderNameAkamai ProviderName = "akamai" -) - -func (p ProviderName) String() string { - return string(p) -} - -// Overwrite type is a string enum of the supported overwrite methods. OverwriteChecksum is not implemented. -// Overwrite.String() will return the string representation of the enum for convenience, either in output or logging. -type Overwrite string - -const ( - OverwriteChecksum Overwrite = "checksum" - OverwriteNever Overwrite = "never" - OverwriteAlways Overwrite = "always" -) - -func (o Overwrite) String() string { - return string(o) -} - -// Naming type is a string enum of the supported object naming methods. -type Naming string - -const ( - NamingRelative Naming = "relative" - NamingAbsolute Naming = "absolute" -) - -// String returns the string representation of the Naming object. -// It converts the Naming object to a string by using the underlying string value. -func (n Naming) String() string { - return string(n) -} - -// S is a shortcut for fmt.Sprintf. The only real purpose is to reduce clutter and line lengths. -func S(format string, a ...any) string { - return fmt.Sprintf(format, a...) -} - -const ( - Empty = "" -) - -// Errors - -const ( - InvalidNamingMethod = "invalid object naming method" - InvalidStorageClass = "invalid storage class" - InvalidOverwriteMethod = "invalid overwrite method" - InvalidTagChars = "invalid characters removed from tag" - - ErrorProfilePath = "error determining profile path" - ErrorOpeningProfile = "error opening profile" - ErrorReadingYaml = "error reading yaml" - - ErrorLoggingFilepathNotSpecified = "path to log file not specified" - ErrorLoggingFilepath = "error determining log file path" - ErrorLoggingLevelTooHigh = "logging level too high, setting to 5" - ErrorLoggingLevelTooLow = "logging level too low, setting to 0" - ErrorGettingFileInfo = "error getting file info" - ErrorFileIsDirectory = "listed file is a directory" - ErrorNoFilesSpecified = "no files, folders, directories specified" - ErrorNoReadableFiles = "no readable files or directories specified" - ErrorUnsupportedProfileVersion = "profile version not supported" - ErrorProviderNotSpecified = "provider not specified" - ErrorBucketNotSpecified = "bucket or region not specified" -) diff --git a/conf/validate.go b/conf/validate.go deleted file mode 100644 index fc214fa..0000000 --- a/conf/validate.go +++ /dev/null @@ -1,76 +0,0 @@ -package conf - -import ( - "errors" -) - -// validateFiles() checks to make sure that at least one file or directory is specified. If not, then an error -// is returned. -func (rc *readConfig) validateFiles() (err error) { - if len(rc.Uploads.Files) == 0 && len(rc.Uploads.Folders) == 0 && len(rc.Uploads.Directories) == 0 { - err = errors.New(ErrorNoFilesSpecified) - } - return -} - -// validateLogging() checks to make sure that if logging to a file is enabled, then a path is specified. If not, -// then an error is returned. Whether the actual file is accessible or not is not checked. -func (rc *readConfig) validateLogging() (err error) { - if rc.Logging.File == true && rc.Logging.Filepath == Empty { - err = errors.New(ErrorLoggingFilepathNotSpecified) - rc.Logging.File = false - } - if rc.Logging.Level > 5 { - rc.Logging.Level = 5 - err = errors.New(ErrorLoggingLevelTooHigh) - } - if rc.Logging.Level < -1 { - rc.Logging.Level = -1 - err = errors.New(ErrorLoggingLevelTooLow) - } - return -} - -// validateProviderAWS() checks that the AWS checks to see if the profile is empty or the key/secret are empty. If -// the profile is empty and one or both of the key/secret are empty, then an error is returned. -func (rc *readConfig) validateProviderAWS() (err error) { - if rc.AWS.Profile != Empty && (rc.AWS.Key != Empty || rc.AWS.Secret != Empty) { - err = errors.New(ErrorAWSProfileAndKeys) - } - if (rc.AWS.Key == Empty && rc.AWS.Secret != Empty) || (rc.AWS.Key != Empty && rc.AWS.Secret == Empty) { - err = errors.New(ErrorAWSKeyOrSecretNotSpecified) - } - return -} - -// validateProviderAkamai() checks to see if the Linode/Akamai Key or Secret are empty, if they are then an error -// is returned. -func (rc *readConfig) validateProviderAkamai() (err error) { - if rc.Akamai.Secret == Empty || rc.Akamai.Key == Empty { - return errors.New(ErrorAkamaiKeyOrSecretNotSpecified) - } - return -} - -// validateProviderOCI() checks to see if the OCI profile is empty. If it is, then an error is returned. -// The compartment can be empty but -func (rc *readConfig) validateProviderOCI() (err error) { - if rc.OCI.Profile == Empty { - return errors.New(ErrorOCIAuthNotSpecified) - } - // This isn't fatal. The provider will just retrieve the tenancy root and use that. - if rc.OCI.Compartment == Empty { - rc.Log.Warn(ErrorOCICompartmentNotSpecified) - } - return nil -} - -// validateVersion() checks that the profile is at version 4; otherwise an error is returned. -// If there are future versions of the profile, then this method will be fleshed out. -// For now, there's only support for version 4 -func (rc *readConfig) validateVersion() (v int, err error) { - if rc.Version < 4 || rc.Version > 4 { - return rc.Version, errors.New(ErrorUnsupportedProfileVersion) - } - return rc.Version, nil -} diff --git a/docs/README_AKAMAI.md b/docs/README_AKAMAI.md index 1db9e15..2079e16 100644 --- a/docs/README_AKAMAI.md +++ b/docs/README_AKAMAI.md @@ -19,28 +19,41 @@ $ s3packer --create="my-new-akamai-profile.yaml" ## Setting up a Profile -s3packer profiles are written in the YAML format. To set one up, you just need to fill out a few fields, and you’ll be good to go! +s3packer profiles are written in YAML. To set one up, you just need to fill out a few fields, and you’ll be good to go! -First, make sure you specify that you're using Version 4 of the profile format and specify `Akamai` as the object storage provider: +First, make sure you specify that you're using Version 4 of the profile format: ```yaml ---- -Version: 4 -Provider: Akamai +Version: 5 +``` + +Be sure to specify a provider: + +```yaml +Version: 5 +Provider: + Use: Linode ``` --- ## Authentication -> 💡 Reminder
-> You can and should remove any provider fields that you're not using, like AWS and OCI. -**s3packer** uses access keys to authenticate with Akamai. You can find and generate access keys in the Cloud Manager. -For info on generating new access keys, check out the [Akamai Object Storage Guide][akamai_auth_url]. +**s3packer** uses object storage access keys to authenticate with Linode. You can find and generate access keys in the +Cloud Manager. For info on generating new access keys, check out the [Linode Object Storage Guide][akamai_auth_url]. + +```yaml +Version: 5 +Provider: + Use: Linode + Key: "zzzyyyyxxxxx1111222" + Secret: "aabbbcccddddeeeffff999988888" +``` + +Configure the `region` to generate a Linode object storage endpoint name. ```yaml -Akamai: - Key: zzzyyyyxxxxx1111222 - Secret: aabbbcccddddeeeffff999988888 +Linode: + Region: se-sto-1 ``` Next, configure the bucket. The `name` and `region` fields are required. If the `region` field isn't correct, @@ -48,9 +61,9 @@ s3packer won't find the bucket and (if configured to) will create a new one in t `Create` defaults to `false`. If `true`, s3packer will create the bucket in the specified region if it doesn't exist. -`Region` should contain the region short-code. When you create a bucket in the Cloud Manager, the short code will be -listed in the region dropdown. You can also check Akamai's documentation for a list of region short-codes: -[Akamai Region List][akamai_region_list_url]. +`Region` should contain the region short-code. When you create a bucket in the Cloud Manager, the short code will be +listed in the region dropdown. You can also check Linode's documentation for a list of region short-codes: +[Linode Region List][akamai_region_list_url]. ```yaml Bucket: @@ -59,70 +72,78 @@ Bucket: Region: "se-sto-1" ``` -Finally, tell s3packer what you want to upload. You can specify folders, directories, or individual files. (You can call -it the `Folders` section or the `Directories` section, it doesn't matter.) +And then, tell s3packer what you want to upload. You can specify directories or individual files. When you specify a +directory, s3packer will traverse all subdirectories. ```yaml -Uploads: - Folders: - - "/Users/forrest/docs/stocks/apple" - - "/Users/jenny/docs/song_lyrics" - Files: - - "/Users/forrest/docs/job-application-lawn-mower.pdf" - - "/Users/forrest/docs/dr-pepper-recipe.txt" - - "/Users/jenny/letters/from-forrest.docx" +Files: + - "/Users/forrest/docs/stocks/apple" + - "/Users/jenny/docs/song_lyrics" +Dirs: + - "/Users/forrest/docs/job-application-lawn-mower.pdf" + - "/Users/forrest/docs/dr-pepper-recipe.txt" + - "/Users/jenny/letters/from-forrest.docx" ``` --- ### Tags -Unfortunately, tags are not supported by Akamai Object Storage. +Unfortunately, tags are not supported by Linode Object Storage. -### Extra Options +--- + +### Linode Specific Options -Akamai Object Storage does support AWS-like ACLs, but these aren't supported by s3packer yet. All uploads will -be set to private by default.

-Storage tiers are _not_ supported by Akamai Object Storage. +Unfortunately, s3packer does not support assigning ACLs to Linode objects. Linode does not support storage tiers. --- -### Object Naming Options +### Extra Options + +You can customize how your files are named and uploaded to Linode object storage. + +--- ```yaml Objects: + NamingType: "relative" NamePrefix: "monthly-" - RootPrefix: "/backups/monthly" - Naming: "relative" + PathPrefix: "/backups/monthly" ``` +**NamingType**
+The default is `relative`. + +- `relative`: The key will be prepended with the relative path of the file on the local filesystem (individual files + specified in the profile will always end up at the root of the bucket, plus the `pathPrefix` and then `objectPrefix`). +- `absolute`: The key will be prepended with the absolute path of the file on the local filesystem. + **NamePrefix**
This is blank by default. Any value you put here will be added before the filename when it's uploaded to S3. Using something like `weekly-` will add that string to any file you're uploading, like `weekly-log.log` or `weekly-2021-01-01.log`. -**RootPrefix**
+**PathPrefix**
This is blank by default. Any value put here will be added before the file path when it's uploaded to S3. If you use something like `/backups/monthly`, the file will be uploaded to `/backups/monthly/your-file.txt`. -**Naming**
-The default is `relative`. -- `relative`: The key will be prepended with the relative path of the file on the local filesystem (individual files specified in the profile will always end up at the root of the bucket, plus the `pathPrefix` and then `objectPrefix`). -- `absolute`: The key will be prepended with the absolute path of the file on the local filesystem. - --- ```yaml Options: - MaxUploads: 100 - Overwrite: "never" + OverwriteObjects: "never" ``` +**MaxParts**
+The default depends on the provider. The AWS default is `100`. MaxParts specifies the number of pieces a large file will +be broken up into before uploading and reassembling. + **MaxUploads**
The default is `5`. This is the maximum number of files that will be uploaded at the same time. Concurrency is at the directory level, so the biggest speed gains are seen when uploading a directory with many files. -**Overwrite**
-This is `never` by default. If you set it to `always`, s3packer will Overwrite any files in the bucket that +**OverwriteObjects**
+This is `never` by default. If you set it to `always`, s3packer will overwrite any files in the bucket that have the same name as what you're uploading. Useful if you're uploading a file that is updated over and over again. --- @@ -134,25 +155,25 @@ And if you like keeping track of things or want a paper trail, you can set up lo ```yaml Logging: Level: 1 - Console: true - File: true - Filepath: "/var/log/backup.log" + OutputToConsole: true + OutputToFile: true + Path: "/var/log/backup.log" ``` **Level:**
This is `2` by default. The setting is by severity, with 0 being least severe and 5 being most severe. 0 will log all messages (including debug), and 5 will only log fatal messages which cause the program to exit. -**Console:**
+**OutputToConsole:**
This is `true` by default. Outputs logging messages to standard output. If you set it to `false`, s3packer prints minimal output. -**File:**
-This is `false` by default. If you set it to `true`, s3packer will write structured log (JSON) messages to -a file. You MUST also specify a `filename`. +**OutputToFile:**
+This is `false` by default. If you set it to `true`, s3packer will write structured log (JSON) messages to a file. You +MUST also specify a `Path`. -**Filepath:**
-File to write structured log messages to. If you set `toFile` to `true`, you must specify a filename. +**Path:**
+Path of the file to write structured log messages to. If you set `OutputToFile` to `true`, you must specify a filename. The file will be created if it doesn't exist, and appended to if it does. --- @@ -171,9 +192,10 @@ directories with a large number of files can take some time as the checksums are --- -### Issues +### Issues & Suggestions -And if you run into any issues or have any suggestions, feel free to open a new issue on [GitHub][issue_repo_url]. +If you run into any problems, errors, or have feature suggestions PLEASE feel free to open a new issue on +[GitHub][issue_repo_url]. --- diff --git a/docs/README_OCI.md b/docs/README_OCI.md index b070b7c..4a1e630 100644 --- a/docs/README_OCI.md +++ b/docs/README_OCI.md @@ -19,30 +19,36 @@ $ s3packer --create="my-new-oci-profile.yaml" ## Setting up a Profile -s3packer profiles are written in the YAML format. To set one up, you just need to fill out a few fields, and you’ll be good to go! +s3packer profiles are written in YAML. To set one up, you just need to fill out a few fields, and you’ll be good to go! -First, make sure you specify that you're using Version 4 of the profile format and specify OCI as the object storage provider: +First, make sure you specify that you're using Version 4 of the profile format: ```yaml -Version: 4 -Provider: oci +Version: 5 +Provider: + Use: oracle ``` --- ## Authentication -> 💡 You can remove the **AWS** section from the profile. -**s3packer** handles OCI authentication that is generated with the OCI-CLI. You can specify a profile that's already set -up in your `~/.oci/config` file. +**s3packer** handles OCI authentication using the config generated with the OCI-CLI. You can specify a profile that's +already set up in your `~/.oci/config` file. For info on setting up the OCI-CLI, check out the [Oracle Cloud documentation][oci_cli_url]. -The compartment field can be left blank. It is only required if s3packer has to create a bucket. If s3packer is creating the bucket, -and no compartment is specified, it will use the tenancy root as the compartment. +```yaml +Version: 5 +Provider: + Use: oracle + Profile: DEFAULT +``` + +Under the OCI field, specify a compartment. It is only required if s3packer has to create a bucket. If s3packer is +creating the bucket and no compartment is specified, it will create the bucket in the tenancy's root compartment. ```yaml OCI: - Profile: "default" Compartment: "ocid1.compartment.oc1..aaaaaaa..." ``` @@ -52,43 +58,41 @@ created in the tenancy's default region. ```yaml Bucket: + Create: true Name: "free-data" Region: "eu-zurich-1" ``` -Finally, tell s3packer what you want to upload. You can specify folders, directories, or individual files. (You can call -it the `Folders` section or the `Directories` section, it doesn't matter.) +Finally, tell s3packer what you want to upload. You can specify directories or individual files. When you specify a +directory, s3packer will traverse all subdirectories. ```yaml -Uploads: - Folders: - - "/Users/forrest/docs/stocks/apple" - - "/Users/jenny/docs/song_lyrics" - Files: - - "/Users/forrest/docs/job-application-lawn-mower.pdf" - - "/Users/forrest/docs/dr-pepper-recipe.txt" - - "/Users/jenny/letters/from-forrest.docx" +Dirs: + - "/Users/forrest/docs/stocks/apple" + - "/Users/jenny/docs/song_lyrics" +Files: + - "/Users/forrest/docs/job-application-lawn-mower.pdf" + - "/Users/forrest/docs/dr-pepper-recipe.txt" + - "/Users/jenny/letters/from-forrest.docx" ``` --- ### Tags -You can also add tags to your files. Just add a `Tagging` section to your profile, like this: +You can also add tags to your files. Just add a `Tags` section to your profile: ```yaml -Tagging: - Tags: - Author: "Forrest Gump" - Year: 1994 +Tags: + Author: "Forrest Gump" + Year: 1994 ``` --- -### Extra Options +### OCI Specific Options -You can also customize how your files are stored, accessed, tagged, and uploaded using these options. +Configure your object storage tier. ---- ```yaml OCI: Storage: "standard" @@ -104,57 +108,69 @@ Read more about OCI's storage tiers here: [https://docs.oracle.com/en-us/iaas/Co --- +### Extra Options + +You can also customize how your files are stored, accessed, tagged, and uploaded using these options. + +--- + ```yaml Objects: + NamingType: "relative" NamePrefix: "monthly-" - RootPrefix: "/backups/monthly" - Naming: "relative" + PathPrefix: "/backups/monthly" ``` +**NamingType**
+The default is `relative`. + +- `relative`: The key will be prepended with the relative path of the file on the local filesystem (individual files + specified in the profile will always end up at the root of the bucket, plus the `pathPrefix` and then `objectPrefix`). +- `absolute`: The key will be prepended with the absolute path of the file on the local filesystem. + **NamePrefix**
This is blank by default. Any value you put here will be added before the filename when it's uploaded to S3. Using something like `weekly-` will add that string to any file you're uploading, like `weekly-log.log` or `weekly-2021-01-01.log`. -**RootPrefix**
+**PathPrefix**
This is blank by default. Any value put here will be added before the file path when it's uploaded to S3. If you use something like `/backups/monthly`, the file will be uploaded to `/backups/monthly/your-file.txt`. -**Naming**
-The default is `relative`. -- `relative`: The key will be prepended with the relative path of the file on the local filesystem (individual files specified in the profile will always end up at the root of the bucket, plus the `pathPrefix` and then `objectPrefix`). -- `absolute`: The key will be prepended with the absolute path of the file on the local filesystem. - --- ```yaml Options: - MaxUploads: 100 - Overwrite: "never" + OverwriteObjects: "never" ``` +**MaxParts**
+The default depends on the provider. The AWS default is `100`. MaxParts specifies the number of pieces a large file will +be broken up into before uploading and reassembling. + **MaxUploads**
The default is `5`. This is the maximum number of files that will be uploaded at the same time. Concurrency is at the directory level, so the biggest speed gains are seen when uploading a directory with many files. -**Overwrite**
-This is `never` by default. If you set it to `always`, s3packer will Overwrite any files in the bucket that +**OverwriteObjects**
+This is `never` by default. If you set it to `always`, s3packer will overwrite any files in the bucket that have the same name as what you're uploading. Useful if you're uploading a file that is updated over and over again. --- ```yaml Tagging: + OriginPath: true ChecksumSHA256: false - Origins: true ``` -**ChecksumSHA256**
-This is `true` by default. Every object uploaded will be tagged with the file's calculated SHA256 checksum. It'll -be used to verify file changes in the future. Whether this is `true` or `false`, the SHA256 checksum will still be -calculated and used to verify the integrity of the file after it's uploaded. -**Origins**
-This is `true` by default. Every object uploaded will be tagged with the full absolute path of the file on the -local filesystem. This is useful if you want to be able to trace the origin of a file in S3. +**OriginPath**
+This is `true` by default. Every object uploaded will be tagged with the full absolute path of the file on the local +filesystem. This is useful if you want to be able to trace the origin of a file in S3. The tag name will be +`s3packer-origin-path`. Oracle may add a prefix to the tag name. + +**ChecksumSHA256**
+This is `true` by default. Every object uploaded will be tagged with the file's calculated SHA256 checksum. The tag name +will be `s3packer-checksum-sha256`. Oracle may add a prefix to the tag name. --- @@ -165,25 +181,25 @@ And if you like keeping track of things or want a paper trail, you can set up lo ```yaml Logging: Level: 1 - Console: true - File: true - Filepath: "/var/log/backup.log" + OutputToConsole: true + OutputToFile: true + Path: "/var/log/backup.log" ``` **Level:**
This is `2` by default. The setting is by severity, with 0 being least severe and 5 being most severe. 0 will log all messages (including debug), and 5 will only log fatal messages which cause the program to exit. -**Console:**
+**OutputToConsole:**
This is `true` by default. Outputs logging messages to standard output. If you set it to `false`, s3packer prints minimal output. -**File:**
-This is `false` by default. If you set it to `true`, s3packer will write structured log (JSON) messages to -a file. You MUST also specify a `filename`. +**OutputToFile:**
+This is `false` by default. If you set it to `true`, s3packer will write structured log (JSON) messages to a file. You +MUST also specify a `Path`. -**Filepath:**
-File to write structured log messages to. If you set `toFile` to `true`, you must specify a filename. +**Path:**
+Path of the file to write structured log messages to. If you set `OutputToFile` to `true`, you must specify a filename. The file will be created if it doesn't exist, and appended to if it does. --- @@ -202,9 +218,10 @@ directories with a large number of files can take some time as the checksums are --- -### Issues +### Issues & Suggestions -And if you run into any issues or have any suggestions, feel free to open a new issue on [GitHub][issue_repo_url]. +If you run into any problems, errors, or have feature suggestions PLEASE feel free to open a new issue on +[GitHub][issue_repo_url]. --- diff --git a/logbot/types.go b/logbot/types.go index 6302fd9..f38353a 100644 --- a/logbot/types.go +++ b/logbot/types.go @@ -38,7 +38,7 @@ func ParseIntLevel(n any) zerolog.Level { } n = x case bool: - return DEBUG + return ERROR case int: n = v default: diff --git a/main.go b/main.go index 8ce1170..64542fd 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,7 @@ package main import ( "errors" "fmt" + "log" "os" pal "github.com/abusomani/go-palette/palette" @@ -40,7 +41,8 @@ getFlags uses the flag package to configure and get command line arguments. It r -- profile: The filename of the profile to load. */ func getFlags() (profile, create string, err error) { - flag.StringVar(&profile, "profile", "", "The profile filename you want to use.") + + flag.StringVar(&profile, "profile", "", "The filename of the profile you want to open.") flag.StringVar(&create, "create", "", "Create a new profile with the specified filename.") flag.Parse() @@ -52,6 +54,7 @@ func getFlags() (profile, create string, err error) { err = errors.New("use either --create or --profile, not both") } return + } /* @@ -64,31 +67,35 @@ main is the entry point of the program. It does the following: 5. Any returned errors from either of the above are printed as warnings and the program terminates with a 0. */ func main() { + p := pal.New(pal.WithBackground(pal.Color(21)), pal.WithForeground(pal.BrightWhite), pal.WithSpecialEffects([]pal.Special{pal.Bold})) - _, _ = p.Println("s3packer v", s3packs.Version) + _, _ = p.Println("s3packer ", s3packs.Version) p.SetOptions(pal.WithDefaults(), pal.WithForeground(pal.BrightWhite)) _, _ = p.Println("https://github.com/orme292/s3packer\n") - pFile, cFile, err := getFlags() + profile, create, err := getFlags() if err != nil { fmt.Println(err.Error()) os.Exit(1) } - if cFile != "" { - err = conf.Create(cFile) + if create != "" { + + builder := conf.NewBuilder(create) + err = builder.YamlOut() if err != nil { - fmt.Printf("An error occurred: %q\n\n", err.Error()) - os.Exit(1) - } else { - os.Exit(0) + log.Fatalf("Unable to write profile: %v", err) } + + log.Printf("File written: %s", create) + os.Exit(0) + } - ac, err := conf.NewAppConfig(pFile) + builder := conf.NewBuilder(profile) + ac, err := builder.FromYaml() if err != nil { - fmt.Println("An error occurred: ", err.Error()) - os.Exit(1) + log.Fatalf("Error loading profile: %v", err) } stats, errs := s3packs.Do(ac) diff --git a/profiles/example1.yaml b/profiles/example1.yaml deleted file mode 100644 index 95dbb80..0000000 --- a/profiles/example1.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -Version: 4 -Provider: aws -AWS: - Profile: default - ACL: private - Storage: standard -Bucket: - Create: false - Name: code-backups - Region: us-east-1 -Options: - MaxUploads: 10 - Overwrite: never -Tagging: - Checksum: true - Origins: true - Tags: - company: big data llc - project: code backups -Objects: - NamePrefix: "" - RootPrefix: "2024-Feb/" - Naming: relative - OmitRootDir: false -Logging: - Level: 1 - Console: true - File: true - Filepath: "/var/log/code-backups.log" -Uploads: - Files: - - file1.txt - - file2.txt - Directories: - - /home/me/dir1 - - /home/me/dir2 diff --git a/profiles/example2.yaml b/profiles/example2.yaml deleted file mode 100644 index 8cb21eb..0000000 --- a/profiles/example2.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -Version: 4 -Provider: oci -OCI: - Profile: "default" - Compartment: "ocid1.compartment.oc1..aaaaaaaaabbbbb..." - Storage: "InfrequentAccess" -Bucket: - Create: false - Name: code-backups - Region: us-ashburn-1 -Options: - MaxUploads: 10 - Overwrite: never -Tagging: - Checksum: true - Origins: true - Tags: - company: big data llc - project: code backups -Objects: - NamePrefix: "" - RootPrefix: "2024-Feb/" - Naming: relative - OmitRootDir: false -Logging: - Level: 1 - Console: true - File: true - Filepath: "/var/log/code-backups.log" -Uploads: - Files: - - file1.txt - - file2.txt - Directories: - - /home/me/dir1 - - /home/me/dir2 diff --git a/profiles/example3.yaml b/profiles/example3.yaml deleted file mode 100644 index 7f3af9b..0000000 --- a/profiles/example3.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -Version: 4 -Provider: Akamai -Akamai: - Key: zzzyyyyxxxxx1111222 - Secret: aabbbcccddddeeeffff999988888 -Bucket: - Create: true - Name: code-backups - Region: se-sto-1 -Options: - MaxUploads: 10 - Overwrite: never -Objects: - NamePrefix: "" - RootPrefix: "2024-Feb/" - Naming: relative - OmitRootDir: false -Logging: - Level: 1 - Console: true - File: true - Filepath: "/var/log/code-backups.log" -Uploads: - Files: - - file1.txt - - file2.txt - Directories: - - /home/me/dir1 - - /home/me/dir2 diff --git a/profiles/example_aws.yaml b/profiles/example_aws.yaml new file mode 100644 index 0000000..6666c63 --- /dev/null +++ b/profiles/example_aws.yaml @@ -0,0 +1,39 @@ +--- +Version: 5 +Provider: + Use: aws + Profile: myAwsProfile +AWS: + ACL: private + Storage: intelligent_tiering +Bucket: + Create: true + Name: MyBackupBucket + Region: us-lax-1 +Options: + MaxParts: 10 + MaxUploads: 5 + OverwriteObjects: never +Tagging: + OriginPath: true + ChecksumSHA256: true +Tags: + Author: Forrest Gump + Title: Letters to Jenny +Objects: + NamingType: relative + NamePrefix: backup- + PathPrefix: /backups/april/2023 + OmitRootDir: true +Logging: + Level: 4 + OutputToConsole: true + OutputToFile: true + Path: /var/log/s3packer.log +Files: + - /documents/to_jenny/letter_1.doc + - /documents/to_jenny/letter_2.doc + - /documents/to_jenny/letter_3.doc +Dirs: + - /documents/from_jenny + - /documents/stock_certificates diff --git a/profiles/example_linode.yaml b/profiles/example_linode.yaml new file mode 100644 index 0000000..191ea36 --- /dev/null +++ b/profiles/example_linode.yaml @@ -0,0 +1,39 @@ +--- +Version: 5 +Provider: + Use: Linode + Key: mykey + Secret: mysecret +Linode: + Region: se-sto-1 +Bucket: + Create: true + Name: "forrests-backup-bucket" + Region: se-sto-1 +Options: + MaxParts: 10 + MaxUploads: 5 + OverwriteObjects: never +Tagging: + OriginPath: true + ChecksumSHA256: true +Tags: + Author: Forrest Gump + Title: Letters to Jenny +Objects: + NamingType: relative + NamePrefix: backup- + PathPrefix: /backups/april/2023 + OmitRootDir: true +Logging: + Level: 4 + OutputToConsole: true + OutputToFile: true + Path: /var/log/s3packer.log +Files: + - /documents/to_jenny/letter_1.doc + - /documents/to_jenny/letter_2.doc + - /documents/to_jenny/letter_3.doc +Dirs: + - /documents/from_jenny + - /documents/stock_certificates diff --git a/profiles/example_oci.yaml b/profiles/example_oci.yaml new file mode 100644 index 0000000..cb35386 --- /dev/null +++ b/profiles/example_oci.yaml @@ -0,0 +1,39 @@ +--- +Version: 5 +Provider: + Use: oci + Profile: default +OCI: + Compartment: ocid1.compartment.oc1..aaaaaaaaa2qfwzyec6js1ua2ybtyyh3m39ze + Storage: standard +Bucket: + Create: true + Name: MyBackupBucket + Region: us-ashburn-1 +Options: + MaxParts: 10 + MaxUploads: 5 + OverwriteObjects: never +Tagging: + OriginPath: true + ChecksumSHA256: true +Tags: + Author: Forrest Gump + Title: Letters to Jenny +Objects: + NamingType: relative + NamePrefix: backup- + PathPrefix: /backups/april/2023 + OmitRootDir: true +Logging: + Level: 4 + OutputToConsole: true + OutputToFile: true + Path: /var/log/s3packer.log +Files: + - /documents/to_jenny/letter_1.doc + - /documents/to_jenny/letter_2.doc + - /documents/to_jenny/letter_3.doc +Dirs: + - /documents/from_jenny + - /documents/stock_certificates diff --git a/s3packs/main.go b/s3packs/main.go index 8bee6d4..3ff48a0 100644 --- a/s3packs/main.go +++ b/s3packs/main.go @@ -20,8 +20,14 @@ func Do(ac *conf.AppConfig) (stats *objectify.Stats, errs provider.Errs) { p, err := provider.NewProcessor(ac, ops, fn) if err != nil { errs.Add(err) + return + } + + if p != nil { + errs = p.Run() + } else { + ac.Log.Fatal("Processor is empty.") } - errs = p.Run() return p.Stats, errs } @@ -39,7 +45,7 @@ func getProvider(ac *conf.AppConfig) (ops provider.Operator, fn provider.Iterato return nil, nil, err } return ops, pack_oci.OracleIteratorFunc, nil - case conf.ProviderNameAkamai: + case conf.ProviderNameLinode: ops, err = pack_akamai.NewAkamaiOperator(ac) if err != nil { return nil, nil, err diff --git a/s3packs/objectify/file_obj.go b/s3packs/objectify/file_obj.go index 6ea168a..802cb93 100644 --- a/s3packs/objectify/file_obj.go +++ b/s3packs/objectify/file_obj.go @@ -52,14 +52,14 @@ func NewFileObj(ac *conf.AppConfig, p, rel string, grp int) (fo *FileObj, err er if err != nil { fo.setIgnore(s("could not get file size: %q", err)) } - if ac.Tag.ChecksumSHA256 { + if ac.TagOpts.ChecksumSHA256 { fo.ChecksumSHA256, err = GetChecksumSHA256(fo.AbsPath) if err != nil { fo.setIgnore(s("could not get checksum: %q", err)) } fo.addTag("ChecksumSHA256", fo.ChecksumSHA256) } - if ac.Tag.Origins { + if ac.TagOpts.OriginPath { fo.addTag("OriginPath", fo.OriginPath) } for k, v := range ac.Tags { diff --git a/s3packs/objectify/helpers.go b/s3packs/objectify/helpers.go index e595b86..1e4b159 100644 --- a/s3packs/objectify/helpers.go +++ b/s3packs/objectify/helpers.go @@ -135,12 +135,12 @@ func formatFullKey(ac *conf.AppConfig, base, od, rr string) (fName, fPseudo stri if rr != EmptyString { fPseudo = formatPseudoPath(ac, od, rr) } - switch ac.Objects.Naming { + switch ac.Objects.NamingType { case conf.NamingRelative: - fPseudo = s("%s/%s", ac.Objects.RootPrefix, fPseudo) + fPseudo = s("%s/%s", ac.Objects.PathPrefix, fPseudo) fPseudo = stripSafePath(fPseudo) default: - fPseudo = s("%s/%s", ac.Objects.RootPrefix, strings.TrimPrefix(od, "/")) + fPseudo = s("%s/%s", ac.Objects.PathPrefix, strings.TrimPrefix(od, "/")) } return fName, fPseudo } diff --git a/s3packs/pack_akamai/helpers.go b/s3packs/pack_akamai/helpers.go index 67efa84..abf3b56 100644 --- a/s3packs/pack_akamai/helpers.go +++ b/s3packs/pack_akamai/helpers.go @@ -22,15 +22,18 @@ func buildUploader(ac *conf.AppConfig) (uploader *manager.Uploader, client *s3.C } func buildClient(ac *conf.AppConfig) (client *s3.Client, err error) { - creds := credentials.NewStaticCredentialsProvider(ac.Provider.Akamai.Key, - ac.Provider.Akamai.Secret, "") - cfg, err := config.LoadDefaultConfig(context.Background(), config.WithCredentialsProvider(creds)) + creds := credentials.NewStaticCredentialsProvider(ac.Provider.Linode.Key, + ac.Provider.Linode.Secret, "") + cfg, err := config.LoadDefaultConfig(context.Background(), + config.WithCredentialsProvider(creds), + config.WithRegion(ac.Bucket.Region), + ) if err != nil { return nil, err } client = s3.NewFromConfig(cfg, func(o *s3.Options) { - o.BaseEndpoint = aws.String(s("https://%s", ac.Provider.Akamai.Endpoint)) + o.BaseEndpoint = aws.String(s("https://%s", ac.Provider.Linode.Endpoint)) }) return client, nil diff --git a/s3packs/pack_akamai/iterator.go b/s3packs/pack_akamai/iterator.go index 1a53c69..111fba5 100644 --- a/s3packs/pack_akamai/iterator.go +++ b/s3packs/pack_akamai/iterator.go @@ -17,7 +17,7 @@ func AkamaiIteratorFunc(ac *conf.AppConfig, fol objectify.FileObjList, grp int) func NewIterator(ac *conf.AppConfig, list objectify.FileObjList, grp int) (iter *AkamaiIterator, err error) { return &AkamaiIterator{ provider: &conf.Provider{ - Is: conf.ProviderNameAkamai, + Is: conf.ProviderNameLinode, }, fol: list, group: grp, diff --git a/s3packs/pack_akamai/operator.go b/s3packs/pack_akamai/operator.go index 4e4e221..431ec39 100644 --- a/s3packs/pack_akamai/operator.go +++ b/s3packs/pack_akamai/operator.go @@ -32,7 +32,7 @@ func (op *AkamaiOperator) CreateBucket() (err error) { } _, err = op.client.CreateBucket(context.Background(), input) if err != nil { - op.ac.Log.Error("Unable to create bucket %q: %q", op.ac.Bucket.Name, err.Error()) + op.ac.Log.Fatal("Unable to create bucket %q in %q: %q", op.ac.Bucket.Name, op.ac.Bucket.Region, err.Error()) return err } op.ac.Log.Info("Created bucket %q in %q", op.ac.Bucket.Name, op.ac.Bucket.Region) diff --git a/s3packs/provider/processor.go b/s3packs/provider/processor.go index b6af6c4..70f5470 100644 --- a/s3packs/provider/processor.go +++ b/s3packs/provider/processor.go @@ -14,8 +14,8 @@ func NewProcessor(ac *conf.AppConfig, ops Operator, iterFn IteratorFunc) (p *Pro ) fmt.Printf("Starting Processor...\n") - if len(ac.Directories) > 0 { - rl, err = objectify.NewRootList(ac, ac.Directories) + if len(ac.Dirs) > 0 { + rl, err = objectify.NewRootList(ac, ac.Dirs) if err != nil { return nil, err } diff --git a/s3packs/types.go b/s3packs/types.go index 11ae080..0bfb70b 100644 --- a/s3packs/types.go +++ b/s3packs/types.go @@ -1,5 +1,5 @@ package s3packs const ( - Version = "v1.3.4" + Version = "v1.4.0" )