From 1a10bb6dafead5928198533748dae8a2a223a34b Mon Sep 17 00:00:00 2001 From: "F." Date: Mon, 16 Dec 2024 00:37:34 +0100 Subject: [PATCH] (feat) added config | logger | repository | secrets --- .env-example | 2 + .trunk/.gitignore | 9 + .trunk/configs/.hadolint.yaml | 4 + .trunk/configs/.markdownlint.yaml | 2 + .trunk/configs/.shellcheckrc | 7 + .trunk/configs/svgo.config.js | 14 + .trunk/trunk.yaml | 39 ++ .vscode/settings.json | 2 + cmd/config/encrypt/main.go | 46 ++ cmd/pg/monitor/main.go | 199 +++++++ configs/config.yaml | 47 ++ go.mod | 104 ++++ go.sum | 263 +++++++++ internal/config/config.go | 419 ++++++++++++++ internal/config/db.go | 79 +++ internal/config/pubsub.go | 98 ++++ internal/config/rate_limiter.go | 29 + internal/config/servers.go | 105 ++++ internal/config/validator.go | 37 ++ internal/constants/config.go | 40 ++ internal/logger/adapter/adapter.go | 538 ++++++++++++++++++ internal/logger/adapter/ewrap.go | 16 + internal/logger/config.go | 58 ++ internal/logger/logger.go | 97 ++++ internal/logger/output/compression.go | 184 ++++++ internal/logger/output/output.go | 537 +++++++++++++++++ internal/logger/output/types.go | 10 + internal/repository/pg/db.go | 266 +++++++++ internal/repository/pg/monitor.go | 389 +++++++++++++ internal/repository/pg/params.go | 35 ++ internal/secrets/encryption/encryption.go | 262 +++++++++ internal/secrets/manager.go | 97 ++++ internal/secrets/providers/aws/provider.go | 174 ++++++ internal/secrets/providers/azure/provider.go | 214 +++++++ .../providers/dotenv/encrypted_provider.go | 145 +++++ internal/secrets/providers/dotenv/provider.go | 151 +++++ internal/secrets/providers/gcp/provider.go | 221 +++++++ internal/secrets/providers/vault/provider.go | 200 +++++++ internal/secrets/types.go | 50 ++ 39 files changed, 5189 insertions(+) create mode 100644 .env-example create mode 100644 .trunk/.gitignore create mode 100644 .trunk/configs/.hadolint.yaml create mode 100644 .trunk/configs/.markdownlint.yaml create mode 100644 .trunk/configs/.shellcheckrc create mode 100644 .trunk/configs/svgo.config.js create mode 100644 .trunk/trunk.yaml create mode 100644 cmd/config/encrypt/main.go create mode 100644 cmd/pg/monitor/main.go create mode 100644 configs/config.yaml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/config/config.go create mode 100644 internal/config/db.go create mode 100644 internal/config/pubsub.go create mode 100644 internal/config/rate_limiter.go create mode 100644 internal/config/servers.go create mode 100644 internal/config/validator.go create mode 100644 internal/constants/config.go create mode 100644 internal/logger/adapter/adapter.go create mode 100644 internal/logger/adapter/ewrap.go create mode 100644 internal/logger/config.go create mode 100644 internal/logger/logger.go create mode 100644 internal/logger/output/compression.go create mode 100644 internal/logger/output/output.go create mode 100644 internal/logger/output/types.go create mode 100644 internal/repository/pg/db.go create mode 100644 internal/repository/pg/monitor.go create mode 100644 internal/repository/pg/params.go create mode 100644 internal/secrets/encryption/encryption.go create mode 100644 internal/secrets/manager.go create mode 100644 internal/secrets/providers/aws/provider.go create mode 100644 internal/secrets/providers/azure/provider.go create mode 100644 internal/secrets/providers/dotenv/encrypted_provider.go create mode 100644 internal/secrets/providers/dotenv/provider.go create mode 100644 internal/secrets/providers/gcp/provider.go create mode 100644 internal/secrets/providers/vault/provider.go create mode 100644 internal/secrets/types.go diff --git a/.env-example b/.env-example new file mode 100644 index 0000000..b4ab1f8 --- /dev/null +++ b/.env-example @@ -0,0 +1,2 @@ +BASE_DB_USERNAME= +BASE_DB_PASSWORD= diff --git a/.trunk/.gitignore b/.trunk/.gitignore new file mode 100644 index 0000000..15966d0 --- /dev/null +++ b/.trunk/.gitignore @@ -0,0 +1,9 @@ +*out +*logs +*actions +*notifications +*tools +plugins +user_trunk.yaml +user.yaml +tmp diff --git a/.trunk/configs/.hadolint.yaml b/.trunk/configs/.hadolint.yaml new file mode 100644 index 0000000..98bf0cd --- /dev/null +++ b/.trunk/configs/.hadolint.yaml @@ -0,0 +1,4 @@ +# Following source doesn't work in most setups +ignored: + - SC1090 + - SC1091 diff --git a/.trunk/configs/.markdownlint.yaml b/.trunk/configs/.markdownlint.yaml new file mode 100644 index 0000000..b40ee9d --- /dev/null +++ b/.trunk/configs/.markdownlint.yaml @@ -0,0 +1,2 @@ +# Prettier friendly markdownlint config (all formatting rules disabled) +extends: markdownlint/style/prettier diff --git a/.trunk/configs/.shellcheckrc b/.trunk/configs/.shellcheckrc new file mode 100644 index 0000000..8c7b1ad --- /dev/null +++ b/.trunk/configs/.shellcheckrc @@ -0,0 +1,7 @@ +enable=all +source-path=SCRIPTDIR +disable=SC2154 + +# If you're having issues with shellcheck following source, disable the errors via: +# disable=SC1090 +# disable=SC1091 diff --git a/.trunk/configs/svgo.config.js b/.trunk/configs/svgo.config.js new file mode 100644 index 0000000..b257d13 --- /dev/null +++ b/.trunk/configs/svgo.config.js @@ -0,0 +1,14 @@ +module.exports = { + plugins: [ + { + name: "preset-default", + params: { + overrides: { + removeViewBox: false, // https://github.com/svg/svgo/issues/1128 + sortAttrs: true, + removeOffCanvasPaths: true, + }, + }, + }, + ], +}; diff --git a/.trunk/trunk.yaml b/.trunk/trunk.yaml new file mode 100644 index 0000000..be7fdc1 --- /dev/null +++ b/.trunk/trunk.yaml @@ -0,0 +1,39 @@ +# This file controls the behavior of Trunk: https://docs.trunk.io/cli +# To learn more about the format of this file, see https://docs.trunk.io/reference/trunk-yaml +version: 0.1 +cli: + version: 1.22.7 +# Trunk provides extensibility via plugins. (https://docs.trunk.io/plugins) +plugins: + sources: + - id: trunk + ref: v1.6.6 + uri: https://github.com/trunk-io/plugins +# Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes) +runtimes: + enabled: + - go@1.21.0 + - node@18.20.5 + - python@3.10.8 +# This is the section where you manage your linters. (https://docs.trunk.io/check/configuration) +lint: + enabled: + - checkov@3.2.334 + - git-diff-check + - gofmt@1.20.4 + - golangci-lint@1.62.2 + - hadolint@2.12.1-beta + - markdownlint@0.43.0 + - prettier@3.4.2 + - shellcheck@0.10.0 + - shfmt@3.6.0 + - svgo@3.3.2 + - trufflehog@3.86.1 + - yamllint@1.35.1 +actions: + disabled: + - trunk-announce + - trunk-check-pre-push + - trunk-fmt-pre-commit + enabled: + - trunk-upgrade-available diff --git a/.vscode/settings.json b/.vscode/settings.json index 53174ef..4ec3985 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -30,6 +30,7 @@ "errname", "errorlint", "esbenp", + "ewrap", "fatcontext", "fieldalignment", "FLUSHALL", @@ -124,6 +125,7 @@ "varnamelen", "wastedassign", "wrapcheck", + "Wrapf", "zerologlint" ] } \ No newline at end of file diff --git a/cmd/config/encrypt/main.go b/cmd/config/encrypt/main.go new file mode 100644 index 0000000..d3e86f7 --- /dev/null +++ b/cmd/config/encrypt/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + "log/slog" + "os" + + "github.com/hyp3rd/base/internal/constants" + "github.com/hyp3rd/base/internal/secrets" + "github.com/hyp3rd/base/internal/secrets/providers/dotenv" +) + +const ( + sourceEnvFile = ".env" + encryptedEnvFile = ".env.encrypted" +) + +func main() { + encryptionPassword, ok := os.LookupEnv("SECRETS_ENCRYPTION_PASSWORD") + if !ok { + fmt.Fprintf(os.Stderr, "SECRETS_ENCRYPTION_PASSWORD environment variable not set\n") + os.Exit(1) + } + + // Initialize the encrypted provider + secretsProviderCfg := secrets.Config{ + Source: secrets.EnvFile, + Prefix: constants.EnvPrefix.String(), + EnvPath: encryptedEnvFile, + } + + provider, err := dotenv.NewEncrypted(secretsProviderCfg, encryptionPassword) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to initiate the configuration encryption provider: %v\n", err) + os.Exit(1) + } + + // Encrypt the existing .env file + err = provider.EncryptFile(sourceEnvFile, encryptedEnvFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to encrypt the .env provided: %v\n", err) + os.Exit(1) + } + + slog.Info("Encryption complete") +} diff --git a/cmd/pg/monitor/main.go b/cmd/pg/monitor/main.go new file mode 100644 index 0000000..0a27098 --- /dev/null +++ b/cmd/pg/monitor/main.go @@ -0,0 +1,199 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/hyp3rd/base/internal/config" + "github.com/hyp3rd/base/internal/constants" + "github.com/hyp3rd/base/internal/logger" + "github.com/hyp3rd/base/internal/logger/adapter" + "github.com/hyp3rd/base/internal/logger/output" + "github.com/hyp3rd/base/internal/repository/pg" + "github.com/hyp3rd/base/internal/secrets" + "github.com/hyp3rd/base/internal/secrets/providers/dotenv" +) + +const ( + maxLogSize = 10 * 1024 * 1024 // 10 MB + logsDir = "logs/pg/monitor" + logsFile = "pg-monitor.log" + + configFileName = "config" + + monitorInterval = 10 * time.Second +) + +func main() { + ctx := context.Background() + + cfg := initConfig(ctx) + log, multiWriter := initLogger(ctx, cfg.Environment) + // Ensure proper cleanup with detailed error handling + defer func() { + if err := multiWriter.Sync(); err != nil { + fmt.Fprintf(os.Stderr, "Logger sync failed: %+v\n", err) + } + + if err := multiWriter.Close(); err != nil { + fmt.Fprintf(os.Stderr, "Writer cleanup failed: %+v\n", err) + } + }() + + log.Info("Database monitor starting") + + dbManager := initDBmanager(ctx, cfg, log) + + // Create monitor with 1 second slow query threshold + monitor := dbManager.NewMonitor(time.Second) + + // Start monitoring + monitor.Start(ctx) + defer monitor.Stop() + + // Create a ticker for periodic checks + ticker := time.NewTicker(monitorInterval) + defer ticker.Stop() + + // Setup signal handling + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) + + // Main process loop + for { + select { + case <-ticker.C: + status := monitor.GetHealthStatus() + if !status.Connected { + log.Error("Database connection lost!") + } else { + log.Info("Database connection is healthy") + } + + if status.PoolStats != nil { + if status.PoolStats.SlowQueries > 0 { + log.Warn("Detected slow queries") + } + } + + case sig := <-sigChan: + log.Infof("Received signal: %v, shutting down...", sig) + + return + case <-ctx.Done(): + log.Info("Context cancelled, shutting down...") + + return + } + } +} + +func initConfig(ctx context.Context) *config.Config { + // Initialize the encrypted provider + secretsProviderCfg := secrets.Config{ + Source: secrets.EnvFile, + Prefix: constants.EnvPrefix.String(), + EnvPath: ".env.encrypted", + } + + encryptionPassword, ok := os.LookupEnv("SECRETS_ENCRYPTION_PASSWORD") + if !ok { + fmt.Fprintf(os.Stderr, "SECRETS_ENCRYPTION_PASSWORD environment variable not set\n") + os.Exit(1) + } + + secretsProvider, err := dotenv.NewEncrypted(secretsProviderCfg, encryptionPassword) + if err != nil { + fmt.Fprintf(os.Stderr, "Secrets provider: %+v\n", err) + os.Exit(1) + } + + // Configure options for config initialization + opts := config.Options{ + ConfigName: configFileName, + SecretsProvider: secretsProvider, + Timeout: constants.DefaultTimeout, + } + + cfg, err := config.NewConfig(ctx, opts) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to initialize config: %v\n", err) + os.Exit(1) + } + + return cfg +} + +func initLogger(_ context.Context, environment string) (logger.Logger, *output.MultiWriter) { + //nolint:mnd + if err := os.MkdirAll(logsDir, 0o755); err != nil { + fmt.Fprintf(os.Stderr, "Failed to create log directory: %v\n", err) + os.Exit(1) + } + + // Create file writer with proper error handling + fileWriter, err := output.NewFileWriter(output.FileConfig{ + Path: logsDir + "/" + logsFile, + MaxSize: maxLogSize, + Compress: true, + }) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create file writer: %v\n", err) + os.Exit(1) + } + + // Create console writer + consoleWriter := output.NewConsoleWriter(os.Stdout, output.ColorModeAuto) + + // Create multi-writer with error handling + multiWriter, err := output.NewMultiWriter(consoleWriter, fileWriter) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create multi-writer: %v\n", err) + fileWriter.Close() // Clean up the file writer + os.Exit(1) + } + + // Initialize the logger + loggerCfg := logger.DefaultConfig() + loggerCfg.Output = multiWriter + loggerCfg.EnableJSON = true + loggerCfg.TimeFormat = time.RFC3339 + loggerCfg.EnableCaller = true + loggerCfg.Level = logger.DebugLevel + loggerCfg.AdditionalFields = []logger.Field{ + {Key: "service", Value: "database-monitor"}, + {Key: "environment", Value: environment}, + } + + // Create the logger + log, err := adapter.NewAdapter(loggerCfg) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create logger: %+v\n", err) + os.Exit(1) + } + + return log, multiWriter +} + +func initDBmanager(ctx context.Context, cfg *config.Config, log logger.Logger) *pg.Manager { + // Initialize the database manager + dbManager := pg.New(&cfg.DB, log) + + err := dbManager.Connect(ctx) + if err != nil { + log.Error("Failed to connect to database") + panic(err) + } + + if dbManager.IsConnected(ctx) { + log.Info("Database connection successfully established") + } else { + log.Error("Database connection wasn't established") + } + + return dbManager +} diff --git a/configs/config.yaml b/configs/config.yaml new file mode 100644 index 0000000..ff79e9f --- /dev/null +++ b/configs/config.yaml @@ -0,0 +1,47 @@ +--- +# development | production | local +environment: "development" +servers: + query_api: + port: 8000 + read_timeout: 15s + write_timeout: 15s + shutdown_timeout: 5s + grpc: + port: 50051 + max_connection_idle: 15m + max_connection_age: 30m + max_connection_age_grace: 5m + keepalive_time: 5m + keepalive_timeout: 20s + +rate_limiter: + requests_per_second: 100 + burst_size: 50 + +db: + host: + port: "5432" + database: postgres + # session | transaction + pool_mode: "transaction" + max_open_conns: 25 + max_idle_conns: 25 + conn_max_lifetime: 5m + conn_attempts: 5 + conn_timeout: 2s + +pubsub: + project_id: "local-project" + topic_id: "fingerprints" + subscription_id: "base-sub" + emulator_host: "localhost:8085" # For local development + ack_deadline: 30s + subscription: + receive_max_outstanding_messages: 10 + receive_num_goroutines: 4 + receive_max_extension: 30s + retry_policy: + max_attempts: 5 + minimum_backoff: 10s + maximum_backoff: 600s diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..5530732 --- /dev/null +++ b/go.mod @@ -0,0 +1,104 @@ +module github.com/hyp3rd/base + +go 1.23.4 + +require ( + cloud.google.com/go/secretmanager v1.14.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.3.0 + github.com/aws/aws-sdk-go-v2 v1.32.6 + github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7 + github.com/hashicorp/vault/api v1.15.0 + github.com/hyp3rd/ewrap v1.0.3 + github.com/jackc/pgx/v5 v5.7.1 + github.com/joho/godotenv v1.5.1 + github.com/rs/zerolog v1.33.0 + github.com/spf13/viper v1.19.0 + golang.org/x/crypto v0.31.0 + google.golang.org/api v0.211.0 + google.golang.org/grpc v1.69.0 +) + +require ( + cloud.google.com/go/auth v0.12.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.47 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/smithy-go v1.22.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.7 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/magiconair/properties v1.8.9 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.8.0 // indirect + google.golang.org/genproto v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/protobuf v1.35.2 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..b119742 --- /dev/null +++ b/go.sum @@ -0,0 +1,263 @@ +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.12.1 h1:n2Bj25BUMM0nvE9D2XLTiImanwZhO3DkfWSYS/SAJP4= +cloud.google.com/go/auth v0.12.1/go.mod h1:BFMu+TNpF3DmvfBO9ClqTR/SiqVIm7LukKF9mbendF4= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.3.0 h1:4Wo2qTaGKFtajbLpF6I4mywg900u3TLlHDb6mriLDPU= +cloud.google.com/go/iam v1.3.0/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/secretmanager v1.14.2 h1:2XscWCfy//l/qF96YE18/oUaNJynAx749Jg3u0CjQr8= +cloud.google.com/go/secretmanager v1.14.2/go.mod h1:Q18wAPMM6RXLC/zVpWTlqq2IBSbbm7pKBlM3lCKsmjw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.3.0 h1:WLUIpeyv04H0RCcQHaA4TNoyrQ39Ox7V+re+iaqzTe0= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.3.0/go.mod h1:hd8hTTIY3VmUVPRHNH7GVCHO3SHgXkJKZHReby/bnUQ= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0 h1:eXnN9kaS8TiDwXjoie3hMRLuwdUBUMW9KRgOqB3mCaw= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.1.0/go.mod h1:XIpam8wumeZ5rVMuhdDQLMfIPDf1WO3IzrCRO3e3e3o= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= +github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7 h1:Nyfbgei75bohfmZNxgN27i528dGYVzqWJGlAO6lzXy8= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7/go.mod h1:FG4p/DciRxPgjA+BEOlwRHN0iA8hX2h9g5buSy3cTDA= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw= +github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= +github.com/hyp3rd/ewrap v1.0.3 h1:hiokZuAy7CJk+Jbh93/Pp/uUIKAALkyuTv9VKL6jNF0= +github.com/hyp3rd/ewrap v1.0.3/go.mod h1:2peK6jpQEuW0aDbDB+SBRA6WRZuFRjdTwRHMyiv+Fv4= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +google.golang.org/api v0.211.0 h1:IUpLjq09jxBSV1lACO33CGY3jsRcbctfGzhj+ZSE/Bg= +google.golang.org/api v0.211.0/go.mod h1:XOloB4MXFH4UTlQSGuNUxw0UT74qdENK8d6JNsXKLi0= +google.golang.org/genproto v0.0.0-20241209162323-e6fa225c2576 h1:k48HcZ4FE6in0o8IflZCkc1lTc2u37nhGd8P+fo4r24= +google.golang.org/genproto v0.0.0-20241209162323-e6fa225c2576/go.mod h1:DV2u3tCn/AcVjjmGYZKt6HyvY4w4y3ipAdHkMbe/0i4= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 0000000..04a9957 --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,419 @@ +package config + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "sync" + "time" + + "github.com/hyp3rd/base/internal/constants" + "github.com/hyp3rd/base/internal/secrets" + "github.com/hyp3rd/ewrap/pkg/ewrap" + "github.com/spf13/viper" +) + +// Config represents the application configuration, which is loaded from a YAML file +// and secrets providers. It contains various configuration options for the servers, +// rate limiter, database, pub/sub, and sensitive credentials. +type Config struct { + Environment string `mapstructure:"environment"` + Servers ServersConfig `mapstructure:"servers"` + RateLimiter RateLimiterConfig `mapstructure:"rate_limiter"` + DB DBConfig `mapstructure:"db"` + PubSub PubSubConfig `mapstructure:"pubsub"` + Secrets *secrets.Store `mapstructure:"-"` // Secrets are handled separately + + mu sync.RWMutex + // rotationCallbacks holds functions to be called after secret rotation + rotationCallbacks []RotationCallback + // secretsManager holds the reference to our secrets manager + secretsManager *secrets.Manager +} + +// RotationCallback is a function that gets called after secrets are rotated. +type RotationCallback func(ctx context.Context, oldSecrets, newSecrets *secrets.Store) error + +// Options holds configuration options for initializing the Config. +type Options struct { + // ConfigName is the name of the configuration file (without extension). + ConfigName string + // SecretsProvider is the interface for accessing secrets. + SecretsProvider secrets.Provider + // Timeout for secrets operations. + Timeout time.Duration +} + +// DefaultOptions returns the default configuration options. +func DefaultOptions() Options { + return Options{ + ConfigName: "config", + // Context: context.Background(), + Timeout: constants.DefaultTimeout, + } +} + +// NewConfig loads the application configuration from a YAML file, environment variables, +// and secrets provider. It validates the configuration before returning. +func NewConfig(ctx context.Context, opts Options) (*Config, error) { + // Use default options if not specified + if opts.ConfigName == "" { + opts.ConfigName = DefaultOptions().ConfigName + } + + if opts.Timeout == 0 { + opts.Timeout = DefaultOptions().Timeout + } + + // Initialize viper configuration + viper.SetConfigName(opts.ConfigName) + viper.SetConfigType("yaml") + viper.AddConfigPath(".") + viper.AddConfigPath("./configs") + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err != nil { + var configFileNotFoundError viper.ConfigFileNotFoundError + if !errors.As(err, &configFileNotFoundError) { + return nil, ewrap.Wrapf(err, "reading config file") + } + } + + // Set defaults after reading config but before unmarshaling + setDefaults() + + // Create base configuration + var cfg Config + if err := viper.Unmarshal(&cfg); err != nil { + return nil, ewrap.Wrapf(err, "unmarshaling config") + } + + // Initialize secrets if a provider is specified + if opts.SecretsProvider != nil { + if err := cfg.initializeSecrets(ctx, opts); err != nil { + return nil, ewrap.Wrapf(err, "initializing secrets") + } + } + + // Initialize DB DSN + cfg.DB.BuildDSN() + + // Validate the complete configuration + if err := validateConfig(&cfg); err != nil { + return nil, ewrap.Wrap(err, "validating configuration") + } + + return &cfg, nil +} + +// initializeSecrets loads secrets from the provided secrets provider. +func (c *Config) initializeSecrets(ctx context.Context, opts Options) error { + ctx, cancel := context.WithTimeout(ctx, opts.Timeout) + defer cancel() + + // Create secrets manager + manager := secrets.NewManager(opts.SecretsProvider) + + // Load secrets + if err := manager.Load(ctx); err != nil { + return ewrap.Wrapf(err, "loading secrets") + } + + // Store the secrets + c.Secrets = manager.GetStore() + + // Update configuration with secret values + if err := c.applySecrets(); err != nil { + return ewrap.Wrapf(err, "applying secrets to configuration") + } + + return nil +} + +// applySecrets updates the configuration with values from the secrets store. +func (c *Config) applySecrets() error { + if c.Secrets == nil { + return ewrap.New("secrets are empty") + } + + // Apply database credentials + if c.Secrets.DBCredentials.Username != "" { + c.DB.Username = c.Secrets.DBCredentials.Username + } + + if c.Secrets.DBCredentials.Password != "" { + c.DB.Password = c.Secrets.DBCredentials.Password + } + + return nil +} + +func setDefaults() { + // QueryAPI defaults + viper.SetDefault("servers.query_api.port", constants.QueryAPIPort) + viper.SetDefault("servers.query_api.read_timeout", constants.QueryAPIReadTimeout) + viper.SetDefault("servers.query_api.write_timeout", constants.QueryAPIWriteTimeout) + viper.SetDefault("servers.query_api.shutdown_timeout", constants.QueryAPIShutdownTimeout) + + // gRPC defaults + viper.SetDefault("servers.grpc.port", constants.GRPCServerPort) + viper.SetDefault("servers.grpc.max_connection_idle", constants.GRPCServerMaxConnectionIdle) + viper.SetDefault("servers.grpc.max_connection_age", constants.GRPCServerMaxConnectionAge) + viper.SetDefault("servers.grpc.max_connection_age_grace", constants.GRPCServerMaxConnectionAgeGrace) + viper.SetDefault("servers.grpc.keepalive_time", constants.GRPCServerKeepaliveTime) + viper.SetDefault("servers.grpc.keepalive_timeout", constants.GRPCServerKeepaliveTimeout) + + // DB defaults + viper.SetDefault("db.max_open_conns", constants.DBMaxOpenConns) + viper.SetDefault("db.max_idle_conns", constants.DBMaxIdleConns) + viper.SetDefault("db.conn_max_lifetime", constants.DBConnMaxLifetime) + + // PubSub defaults + viper.SetDefault("pubsub.ack_deadline", constants.PubSubAckDeadline) + viper.SetDefault("pubsub.retry_policy.minimum_backoff", constants.PubSubRetryPolicyMinimumBackoff) + viper.SetDefault("pubsub.retry_policy.maximum_backoff", constants.PubSubRetryPolicyMaximumBackoff) + viper.SetDefault("pubsub.rate_limit.requests_per_second", constants.PubSubRateLimitRequestsPerSecond) + viper.SetDefault("pubsub.rate_limit.burst_size", constants.PubSubRateLimitBurstSize) +} + +func validateConfig(cfg *Config) error { + validator := NewValidator() + + return validator.Validate(&cfg.Servers, + &cfg.RateLimiter, + &cfg.DB, + &cfg.PubSub) +} + +// RegisterRotationCallback adds a callback to be executed after secret rotation. +func (c *Config) RegisterRotationCallback(callback RotationCallback) { + c.mu.Lock() + defer c.mu.Unlock() + c.rotationCallbacks = append(c.rotationCallbacks, callback) +} + +// ReloadSecrets refreshes all secrets from the provider. +func (c *Config) ReloadSecrets(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.secretsManager == nil { + return ewrap.New("secrets manager not initialized") + } + + // Store old secrets for callbacks + oldSecrets := c.Secrets + + // Create a new manager to load fresh secrets + if err := c.secretsManager.Load(ctx); err != nil { + return ewrap.Wrapf(err, "reloading secrets") + } + + // Get the fresh secrets + newSecrets := c.secretsManager.GetStore() + c.Secrets = newSecrets + + // Apply the new secrets to configuration + if err := c.applySecrets(); err != nil { + // Rollback on failure + c.Secrets = oldSecrets + + return ewrap.Wrapf(err, "applying reloaded secrets") + } + + // Execute rotation callbacks + for _, callback := range c.rotationCallbacks { + if err := callback(ctx, oldSecrets, newSecrets); err != nil { + // Log error but continue with other callbacks + // You might want to handle this differently based on your requirements + c.logRotationCallbackError(err, callback) + } + } + + return nil +} + +func (c *Config) logRotationCallbackError(err error, callback RotationCallback) { + // Log error but continue with other callbacks +} + +// RotateSecrets performs a full secret rotation +func (c *Config) RotateSecrets(ctx context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.secretsManager == nil { + return ewrap.New("secrets manager not initialized") + } + + // Store old secrets for potential rollback and callbacks + oldSecrets := c.Secrets + + // Create rotation context with timeout + rotationCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + + // Start the rotation process + newSecrets, err := c.performRotation(rotationCtx) + if err != nil { + return err + } + + // Update current secrets + c.Secrets = newSecrets + + // Apply the new secrets to configuration + if err := c.applySecrets(); err != nil { + // Rollback on failure + c.Secrets = oldSecrets + c.secretsManager.SetStore(oldSecrets) + + return ewrap.Wrapf(err, "applying rotated secrets") + } + + // Execute rotation callbacks + return c.executeRotationCallbacks(ctx, oldSecrets, newSecrets) +} + +// performRotation handles the actual secret rotation process with proper verification +// and atomic updates. It generates new credentials, verifies them, and ensures +// a safe transition from old to new secrets. +func (c *Config) performRotation(ctx context.Context) (*secrets.Store, error) { + // Create a new secrets store that will hold our rotated secrets + newSecrets := &secrets.Store{} + + // Track our progress for potential rollback + var completedRotations []string + + // Generate and store new database credentials + if err := c.rotateDatabaseCredentials(ctx, newSecrets); err != nil { + return nil, c.handleRotationFailure(ctx, completedRotations, err) + } + + completedRotations = append(completedRotations, "database") + + // Perform other rotations here to follow. + + completedRotations = append(completedRotations, "api_keys") + + return newSecrets, nil +} + +// rotateDatabaseCredentials handles the rotation of database credentials +func (c *Config) rotateDatabaseCredentials(ctx context.Context, newSecrets *secrets.Store) error { + // Generate new secure credentials + username, err := generateSecureString(32) + if err != nil { + return ewrap.Wrapf(err, "generating new username") + } + + password, err := generateSecureString(64) + if err != nil { + return ewrap.Wrapf(err, "generating new password") + } + + // Store the new credentials temporarily + newSecrets.DBCredentials.Username = username + newSecrets.DBCredentials.Password = password + + // Create metadata for the rotation + metadata := map[string]string{ + "rotated_at": time.Now().UTC().Format(time.RFC3339), + "reason": "scheduled_rotation", + } + + // Store new credentials in the secrets provider with metadata + if err := c.storeDBCredentials(ctx, username, password, metadata); err != nil { + return ewrap.Wrapf(err, "storing new database credentials") + } + + // Verify the new credentials work + if err := c.verifyDBCredentials(ctx, username, password); err != nil { + return ewrap.Wrapf(err, "verifying new database credentials") + } + + return nil +} + +// handleRotationFailure attempts to rollback any completed rotations +func (c *Config) handleRotationFailure(ctx context.Context, completedRotations []string, err error) error { + // Create a new context with timeout for rollback operations + rollbackCtx, cancel := context.WithTimeout(ctx, constants.DefaultTimeout) + defer cancel() + + rollbackErr := c.rollbackRotations(rollbackCtx, completedRotations) + if rollbackErr != nil { + // If rollback fails, wrap both errors together + return ewrap.New("rotation and rollback failed"). + WithMetadata("rotation_error", err). + WithMetadata("rollback_error", rollbackErr) + } + + return ewrap.Wrapf(err, "rotation failed and was rolled back") +} + +// storeDBCredentials stores the new database credentials in the secrets provider +func (c *Config) storeDBCredentials(ctx context.Context, username, password string, metadata map[string]string) error { + // Store username + + if err := c.secretsManager.Provider.SetSecret(ctx, "DB_USERNAME", username); err != nil { + return ewrap.Wrapf(err, "storing username") + } + + // Store password + if err := c.secretsManager.Provider.SetSecret(ctx, "DB_PASSWORD", password); err != nil { + return ewrap.Wrapf(err, "storing password") + } + + return nil +} + +// generateSecureString generates a cryptographically secure random string +func generateSecureString(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", ewrap.Wrapf(err, "generating random bytes") + } + + return base64.URLEncoding.EncodeToString(bytes)[:length], nil +} + +// verifyDBCredentials attempts to verify that the new database credentials work +func (c *Config) verifyDBCredentials(ctx context.Context, username, password string) error { + // Implementation would depend on your database setup + // Example pseudo-code: + // db, err := sql.Open("postgres", fmt.Sprintf("user=%s password=%s", username, password)) + // if err != nil { + // return ewrap.Wrapf(err, "opening test connection") + // } + // defer db.Close() + // return db.PingContext(ctx) + return nil // TODO: Implement actual verification +} + +// rollbackRotations attempts to restore the previous state for completed rotations +func (c *Config) rollbackRotations(ctx context.Context, completedRotations []string) error { + // Implementation would restore the old secrets for each completed rotation + // This would vary based on your specific requirements and setup + return nil // TODO: Implement actual rollback logic +} + +func (c *Config) executeRotationCallbacks(ctx context.Context, oldSecrets, newSecrets *secrets.Store) error { + var errs []error + + // Execute all callbacks + for _, callback := range c.rotationCallbacks { + if err := callback(ctx, oldSecrets, newSecrets); err != nil { + errs = append(errs, err) + } + } + + // If any callbacks failed, return a combined error + if len(errs) > 0 { + return ewrap.New("one or more rotation callbacks failed"). + WithMetadata("errors", errs) + } + + return nil +} diff --git a/internal/config/db.go b/internal/config/db.go new file mode 100644 index 0000000..219eb08 --- /dev/null +++ b/internal/config/db.go @@ -0,0 +1,79 @@ +package config + +import ( + "strings" + "time" + + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +// implement the validatable interface. +var _ validatable = (*DBConfig)(nil) + +// DBConfig holds the SQL databases configuration across the system. +type DBConfig struct { + DSN string `mapstructure:"dsn"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + Host string `mapstructure:"host"` + Port string `mapstructure:"port"` + Database string `mapstructure:"database"` + PoolMode string `mapstructure:"pool_mode"` + MaxOpenConns int32 `mapstructure:"max_open_conns"` + MaxIdleConns int32 `mapstructure:"max_idle_conns"` + ConnMaxLifetime time.Duration `mapstructure:"conn_max_lifetime"` + ConnAttempts int `mapstructure:"conn_attempts"` + ConnTimeout time.Duration `mapstructure:"conn_timeout"` +} + +func (c *DBConfig) BuildDSN() { + builder := strings.Builder{} + builder.WriteString("postgresql://") + builder.WriteString(c.Username) + builder.WriteString(":") + builder.WriteString(c.Password) + builder.WriteString("@") + builder.WriteString(c.Host) + builder.WriteString(":") + builder.WriteString(c.Port) + builder.WriteString("/") + builder.WriteString(c.Database) + + c.DSN = builder.String() +} + +// Validate checks the validity of the DBConfig struct and returns an ErrorGroup +// containing any configuration errors found. +func (c *DBConfig) Validate(eg *ewrap.ErrorGroup) { + if c.DSN == "" { + eg.Add(ewrap.New("database DSN is required")) + } + + if c.MaxOpenConns <= 0 { + eg.Add(ewrap.New("invalid max open connections").WithMetadata("max_open_conns", c.MaxOpenConns)) + } + + if c.MaxIdleConns <= 0 { + eg.Add(ewrap.New("invalid max idle connections").WithMetadata("max_idle_conns", c.MaxIdleConns)) + } + + if c.ConnMaxLifetime <= 0 { + eg.Add(ewrap.New("invalid connection max lifetime").WithMetadata("conn_max_lifetime", c.ConnMaxLifetime)) + } else { + if _, err := time.ParseDuration(c.ConnMaxLifetime.String()); err != nil { + eg.Add(ewrap.New("invalid connection max lifetime").WithMetadata("conn_max_lifetime", c.ConnMaxLifetime)) + } + } + + if c.ConnAttempts <= 0 { + eg.Add(ewrap.New("invalid connection attempts").WithMetadata("conn_attempts", c.ConnAttempts)) + } + + if c.ConnTimeout <= 0 { + eg.Add(ewrap.New("invalid connection timeout").WithMetadata("conn_timeout", c.ConnTimeout)) + } else { + if _, err := time.ParseDuration(c.ConnTimeout.String()); err != nil { + eg.Add(ewrap.New("invalid connection timeout").WithMetadata("conn_timeout", c.ConnTimeout)) + } + } +} diff --git a/internal/config/pubsub.go b/internal/config/pubsub.go new file mode 100644 index 0000000..0fa9208 --- /dev/null +++ b/internal/config/pubsub.go @@ -0,0 +1,98 @@ +package config + +import ( + "time" + + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +// implement the validatable interface. +var _ validatable = (*PubSubConfig)(nil) + +// PubSubConfig holds the pubsub (typically GCP) configuration, globally for the system. +type PubSubConfig struct { + ProjectID string `mapstructure:"project_id"` + TopicID string `mapstructure:"topic_id"` + SubscriptionID string `mapstructure:"subscription_id"` + EmulatorHost string `mapstructure:"emulator_host"` + AckDeadline time.Duration `mapstructure:"ack_deadline"` + Subscription Subscription `mapstructure:"subscription"` + RetryPolicy RetryPolicy `mapstructure:"retry_policy"` +} + +type Subscription struct { + ReceiveMaxOutstandingMessages int `mapstructure:"receive_max_outstanding_messages"` + ReceiveNumGoroutines int `mapstructure:"receive_num_goroutines"` + ReceiveMaxExtension time.Duration `mapstructure:"receive_max_extension"` +} + +// RetryPolicy holds the retry policy for pubsub messages. +type RetryPolicy struct { + MaxAttempts int `mapstructure:"max_attempts"` + MinimumBackoff time.Duration `mapstructure:"minimum_backoff"` + MaximumBackoff time.Duration `mapstructure:"maximum_backoff"` +} + +// Validate checks the validity of the PubSubConfig and returns an ErrorGroup containing any +// configuration errors. It ensures that either project_id or emulator_host is set, and that +// topic_id and subscription_id are not empty. It also validates the ack_deadline and +// retry_policy configurations. +func (c *PubSubConfig) Validate(eg *ewrap.ErrorGroup) { + if c.ProjectID == "" && c.EmulatorHost == "" { + eg.Add(ewrap.New("either project_id or emulator_host is required for PubSub")) + } + + if c.TopicID == "" { + eg.Add(ewrap.New("topic_id is required for PubSub")) + } + + if c.SubscriptionID == "" { + eg.Add(ewrap.New("subscription_id is required for PubSub")) + } + + c.validateAckDeadline(eg) + c.validateSubscription(eg) + c.validateRetryPolicy(eg) +} + +func (c *PubSubConfig) validateAckDeadline(eg *ewrap.ErrorGroup) { + if c.AckDeadline <= 0 { + eg.Add(ewrap.New("invalid pubsub ack_deadline").WithMetadata("ack_deadline", c.AckDeadline)) + } else if _, err := time.ParseDuration(c.AckDeadline.String()); err != nil { + eg.Add(ewrap.New("invalid pubsub ack_deadline").WithMetadata("ack_deadline", c.AckDeadline)) + } +} + +func (c *PubSubConfig) validateSubscription(eg *ewrap.ErrorGroup) { + if c.Subscription.ReceiveMaxOutstandingMessages <= 0 { + eg.Add(ewrap.New("invalid pubsub subscription receive_max_outstanding_messages").WithMetadata("receive_max_outstanding_messages", c.Subscription.ReceiveMaxOutstandingMessages)) + } + + if c.Subscription.ReceiveNumGoroutines <= 0 { + eg.Add(ewrap.New("invalid pubsub subscription receive_num_goroutines").WithMetadata("receive_num_goroutines", c.Subscription.ReceiveNumGoroutines)) + } + + if c.Subscription.ReceiveMaxExtension <= 0 { + eg.Add(ewrap.New("invalid pubsub subscription receive_max_extension").WithMetadata("receive_max_extension", c.Subscription.ReceiveMaxExtension)) + } else if _, err := time.ParseDuration(c.Subscription.ReceiveMaxExtension.String()); err != nil { + eg.Add(ewrap.New("invalid pubsub subscription receive_max_extension").WithMetadata("receive_max_extension", c.Subscription.ReceiveMaxExtension)) + } +} + +func (c *PubSubConfig) validateRetryPolicy(eg *ewrap.ErrorGroup) { + if c.RetryPolicy.MaxAttempts <= 0 || c.RetryPolicy.MaxAttempts > 10 { + eg.Add(ewrap.New("invalid pubsub retry_policy max_attempts").WithMetadata("max_attempts", c.RetryPolicy.MaxAttempts)) + } + + if c.RetryPolicy.MinimumBackoff <= 0 { + eg.Add(ewrap.New("invalid pubsub retry_policy minimum_backoff").WithMetadata("minimum_backoff", c.RetryPolicy.MinimumBackoff)) + } else if _, err := time.ParseDuration(c.RetryPolicy.MinimumBackoff.String()); err != nil { + eg.Add(ewrap.New("invalid pubsub retry_policy minimum_backoff").WithMetadata("minimum_backoff", c.RetryPolicy.MinimumBackoff)) + } + + if c.RetryPolicy.MaximumBackoff <= 0 { + eg.Add(ewrap.New("invalid pubsub retry_policy maximum_backoff").WithMetadata("maximum_backoff", c.RetryPolicy.MaximumBackoff)) + } else if _, err := time.ParseDuration(c.RetryPolicy.MaximumBackoff.String()); err != nil { + eg.Add(ewrap.New("invalid pubsub retry_policy maximum_backoff").WithMetadata("maximum_backoff", c.RetryPolicy.MaximumBackoff)) + } +} diff --git a/internal/config/rate_limiter.go b/internal/config/rate_limiter.go new file mode 100644 index 0000000..c2d5920 --- /dev/null +++ b/internal/config/rate_limiter.go @@ -0,0 +1,29 @@ +package config + +import "github.com/hyp3rd/ewrap/pkg/ewrap" + +// implement the validatable interface. +var _ validatable = (*RateLimiterConfig)(nil) + +// RateLimiterConfig holds the rate limiter configuration, globally for the system. +type RateLimiterConfig struct { + RequestsPerSecond int `mapstructure:"requests_per_second"` + BurstSize int `mapstructure:"burst_size"` +} + +// Validate ensures the RateLimiterConfig is valid. It checks that the requests_per_second and burst_size +// values are greater than 0, and that requests_per_second is greater than burst_size. +// If any of these conditions are not met, it adds an error to the provided ErrorGroup. +func (c *RateLimiterConfig) Validate(eg *ewrap.ErrorGroup) { + if c.RequestsPerSecond <= 0 { + eg.Add(ewrap.New("rate limiter requests_per_second must be greater than 0")) + } + + if c.BurstSize <= 0 { + eg.Add(ewrap.New("rate limiter burst_size must be greater than 0")) + } + + if c.RequestsPerSecond < c.BurstSize { + eg.Add(ewrap.New("rate limiter requests_per_second must be greater than burst_size")) + } +} diff --git a/internal/config/servers.go b/internal/config/servers.go new file mode 100644 index 0000000..26bd3e8 --- /dev/null +++ b/internal/config/servers.go @@ -0,0 +1,105 @@ +package config + +import ( + "time" + + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +// implement the validatable interface. +var _ validatable = (*ServersConfig)(nil) + +// ServersConfig holds the servers configuration across the system. +type ServersConfig struct { + QueryAPI QueryAPIConfig `mapstructure:"query_api"` + GRPC GRPCConfig `mapstructure:"grpc"` +} + +// QueryServerConfig holds the Query API http server configuration. +type QueryAPIConfig struct { + Port int `mapstructure:"port"` + ReadTimeout time.Duration `mapstructure:"read_timeout"` + WriteTimeout time.Duration `mapstructure:"write_timeout"` + ShutdownTimeout time.Duration `mapstructure:"shutdown_timeout"` +} + +// GRPCConfig holds the gRPC servers configuration. +type GRPCConfig struct { + Port int `mapstructure:"port"` + MaxConnectionIdle time.Duration `mapstructure:"max_connection_idle"` + MaxConnectionAge time.Duration `mapstructure:"max_connection_age"` + MaxConnectionAgeGrace time.Duration `mapstructure:"max_connection_age_grace"` + KeepAliveTime time.Duration `mapstructure:"keepalive_time"` + KeepAliveTimeout time.Duration `mapstructure:"keepalive_timeout"` +} + +// Validate validates the ServersConfig by checking the validity of the QueryAPI and GRPC configurations. +func (c *ServersConfig) Validate(eg *ewrap.ErrorGroup) { + c.validateQueryAPI(eg) + c.validateGRPC(eg) +} + +func validPort(port int, privileged bool) bool { + // ensure the port is valid and in the range 1-65535 + if privileged { + return port > 0 && port <= 65535 + } + + return port > 1023 && port <= 65535 +} + +func (c *ServersConfig) validateQueryAPI(eg *ewrap.ErrorGroup) { + if !validPort(c.QueryAPI.Port, false) { + eg.Add(ewrap.New("query API port must be greater than 1023 and less than 65535")) + } + + if c.QueryAPI.ReadTimeout <= 0 { + eg.Add(ewrap.New("query API read timeout must be greater than 0")) + } else if _, err := time.ParseDuration(c.QueryAPI.ReadTimeout.String()); err != nil { + eg.Add(ewrap.Wrap(err, "query API read timeout is invalid")) + } + + if c.QueryAPI.WriteTimeout <= 0 { + eg.Add(ewrap.New("query API write timeout must be greater than 0")) + } else if _, err := time.ParseDuration(c.QueryAPI.WriteTimeout.String()); err != nil { + eg.Add(ewrap.Wrap(err, "query API write timeout is invalid")) + } + + if c.QueryAPI.ShutdownTimeout <= 0 { + eg.Add(ewrap.New("query API shutdown timeout must be greater than 0")) + } else if _, err := time.ParseDuration(c.QueryAPI.ShutdownTimeout.String()); err != nil { + eg.Add(ewrap.Wrap(err, "query API shutdown timeout is invalid")) + } +} + +func (c *ServersConfig) validateGRPC(eg *ewrap.ErrorGroup) { + if !validPort(c.QueryAPI.Port, false) { + eg.Add(ewrap.New("gRPC port must be greater than 0")) + } + + if c.GRPC.MaxConnectionIdle <= 0 { + eg.Add(ewrap.New("gRPC max connection idle must be greater than 0")) + } + + if c.GRPC.MaxConnectionAge <= 0 { + eg.Add(ewrap.New("gRPC max connection age must be greater than 0")) + } + + if c.GRPC.MaxConnectionAgeGrace <= 0 { + eg.Add(ewrap.New("gRPC max connection age grace must be greater than 0")) + } else if _, err := time.ParseDuration(c.GRPC.MaxConnectionAgeGrace.String()); err != nil { + eg.Add(ewrap.Wrap(err, "gRPC max connection age grace is invalid")) + } + + if c.GRPC.KeepAliveTime <= 0 { + eg.Add(ewrap.New("gRPC keepalive time must be greater than 0")) + } else if _, err := time.ParseDuration(c.GRPC.KeepAliveTime.String()); err != nil { + eg.Add(ewrap.Wrap(err, "gRPC keepalive time is invalid")) + } + + if c.GRPC.KeepAliveTimeout <= 0 { + eg.Add(ewrap.New("gRPC keepalive timeout must be greater than 0")) + } else if _, err := time.ParseDuration(c.GRPC.KeepAliveTimeout.String()); err != nil { + eg.Add(ewrap.Wrap(err, "gRPC keepalive timeout is invalid")) + } +} diff --git a/internal/config/validator.go b/internal/config/validator.go new file mode 100644 index 0000000..450da71 --- /dev/null +++ b/internal/config/validator.go @@ -0,0 +1,37 @@ +package config + +import ( + "errors" + + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +type validatable interface { + Validate(eg *ewrap.ErrorGroup) +} + +// Validator is a struct that holds an ErrorGroup for collecting validation errors. +type Validator struct { + Errors *ewrap.ErrorGroup +} + +// NewValidator creates a new Validator instance with an empty ErrorGroup. +func NewValidator() *Validator { + return &Validator{ + Errors: ewrap.NewErrorGroup(), + } +} + +// Validate validates the given validatable configurations and returns an error if any of them are invalid. +// The Validator collects all errors in its Errors field, which can be inspected after calling Validate. +func (v *Validator) Validate(configs ...validatable) error { + for _, c := range configs { + c.Validate(v.Errors) + } + + if v.Errors.HasErrors() { + return errors.Join(v.Errors.Errors()...) + } + + return nil +} diff --git a/internal/constants/config.go b/internal/constants/config.go new file mode 100644 index 0000000..0a29a09 --- /dev/null +++ b/internal/constants/config.go @@ -0,0 +1,40 @@ +package constants + +import "time" + +type ConfigEnvKey string + +const ( + EnvPrefix = ConfigEnvKey("BASE") + // DBUsername is the environment variable name for the database username. + DBUsername = ConfigEnvKey("DB_USERNAME") + // DBPassword is the environment variable name for the database password. + DBPassword = ConfigEnvKey("DB_PASSWORD") +) + +// String implements the flag.Value interface. +func (k ConfigEnvKey) String() string { + return string(k) +} + +const ( + DefaultTimeout = 30 * time.Second + QueryAPIPort = 8000 + QueryAPIReadTimeout = "15s" + QueryAPIWriteTimeout = "15s" + QueryAPIShutdownTimeout = "5s" + GRPCServerPort = 50051 + GRPCServerMaxConnectionIdle = "15m" + GRPCServerMaxConnectionAge = "30m" + GRPCServerMaxConnectionAgeGrace = "5m" + GRPCServerKeepaliveTime = "5m" + GRPCServerKeepaliveTimeout = "20s" + DBMaxOpenConns = 25 + DBMaxIdleConns = 25 + DBConnMaxLifetime = "5m" + PubSubAckDeadline = "30s" + PubSubRetryPolicyMinimumBackoff = "10s" + PubSubRetryPolicyMaximumBackoff = "600s" + PubSubRateLimitRequestsPerSecond = 100 + PubSubRateLimitBurstSize = 50 +) diff --git a/internal/logger/adapter/adapter.go b/internal/logger/adapter/adapter.go new file mode 100644 index 0000000..f3690ad --- /dev/null +++ b/internal/logger/adapter/adapter.go @@ -0,0 +1,538 @@ +package adapter + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "runtime" + "strings" + "sync" + "time" + + "github.com/hyp3rd/base/internal/logger" + "github.com/hyp3rd/base/internal/logger/output" + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +const ( + callerDepth = 3 + bufferTimeout = 100 * time.Millisecond +) + +// bufferPool maintains a pool of reusable byte buffers to minimize allocations. +// +//nolint:gochecknoglobals +var bufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// adapter implements the Logger interface with high-performance logging. +type adapter struct { + config logger.Config + mu sync.RWMutex + fields []logger.Field + buffer chan logEntry + done chan struct{} + wg *sync.WaitGroup // Change to pointer +} + +// logEntry represents a single log entry. +type logEntry struct { + Level logger.Level + Message string + Fields []logger.Field + Timestamp time.Time + Caller string + Error error +} + +// NewAdapter creates a new logger adapter. +func NewAdapter(config logger.Config) (logger.Logger, error) { + if config.Output == nil { + return nil, ewrap.New("output writer is required") + } + + if config.AsyncBufferSize == 0 { + config.AsyncBufferSize = logger.DefaultAsyncBufferSize + } + + wg := new(sync.WaitGroup) // Create WaitGroup pointer + + loggerAdapter := &adapter{ + config: config, + buffer: make(chan logEntry, config.AsyncBufferSize), + done: make(chan struct{}), + wg: wg, // Store pointer + } + + // Start background writer + loggerAdapter.wg.Add(1) + go loggerAdapter.processLogs() + + return loggerAdapter, nil +} + +// processLogs handles the background processing of log entries with proper shutdown. +func (a *adapter) processLogs() { + defer a.wg.Done() + + for { + select { + case entry, ok := <-a.buffer: + if !ok { + // Channel is closed, process any remaining entries + return + } + + a.writeLog(entry) + case <-a.done: + // Process any remaining entries in the buffer + for { + select { + case entry, ok := <-a.buffer: + if !ok { + return + } + + a.writeLog(entry) + default: + // No more entries in buffer + return + } + } + } + } +} + +// writeLog handles the actual writing of log entries with improved error reporting. +func (a *adapter) writeLog(entry logEntry) { + if a.config.Output == nil { + return + } + + buf := a.getBuffer() + defer bufferPool.Put(buf) + + a.formatEntry(buf, entry) + a.ensureNewline(buf) + + contents := buf.Bytes() + + a.mu.Lock() + defer a.mu.Unlock() + + switch output := a.config.Output.(type) { + case *output.MultiWriter: + a.handleMultiWriter(output, contents, entry) + default: + a.handleSingleWriter(output, contents, entry) + } +} + +func (a *adapter) getBuffer() *bytes.Buffer { + buf, ok := bufferPool.Get().(*bytes.Buffer) + if !ok { + buf = new(bytes.Buffer) + } + + buf.Reset() + + return buf +} + +func (a *adapter) formatEntry(buf *bytes.Buffer, entry logEntry) { + if a.config.EnableJSON { + a.writeJSONLog(buf, entry) + } else { + a.writeTextLog(buf, entry) + } +} + +func (a *adapter) ensureNewline(buf *bytes.Buffer) { + if buf.Len() > 0 && buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } +} + +func (a *adapter) handleMultiWriter(output *output.MultiWriter, contents []byte, entry logEntry) { + writeResults := a.collectWriteResults(output, contents) + successCount, incompleteWrites, errorWrites := a.analyzeResults(writeResults, contents) + + if len(errorWrites) > 0 || len(incompleteWrites) > 0 { + a.reportWriteIssues(entry, contents, successCount, len(writeResults), incompleteWrites, errorWrites) + } +} + +func (a *adapter) collectWriteResults(mwOutput *output.MultiWriter, contents []byte) []output.WriteResult { + writeResults := make([]output.WriteResult, 0, len(mwOutput.Writers)) + + for _, writer := range mwOutput.Writers { + if writer == nil { + continue + } + + bytesWritten, err := writer.Write(contents) + writeResults = append(writeResults, output.WriteResult{ + Writer: writer, + Name: fmt.Sprintf("%T", writer), + Bytes: bytesWritten, + Err: err, + }) + } + + return writeResults +} + +func (a *adapter) analyzeResults(writeResults []output.WriteResult, contents []byte) (int, []string, []string) { + successCount := 0 + + var incompleteWrites, errorWrites []string + + for _, result := range writeResults { + switch { + case result.Err != nil: + errorWrites = append(errorWrites, fmt.Sprintf("%s: error: %v", result.Name, result.Err)) + case result.Bytes != len(contents): + incompleteWrites = append(incompleteWrites, fmt.Sprintf("%s: partial write %d/%d bytes", result.Name, result.Bytes, len(contents))) + default: + successCount++ + } + } + + return successCount, incompleteWrites, errorWrites +} + +func (a *adapter) reportWriteIssues(entry logEntry, contents []byte, successCount, totalWrites int, incompleteWrites, errorWrites []string) { + diagMsg := fmt.Sprintf( + "Write issues detected:\n"+ + " Level: %s\n"+ + " Message: %q\n"+ + " Buffer size: %d bytes\n"+ + " Successful writes: %d/%d", + entry.Level, + entry.Message, + len(contents), + successCount, + totalWrites, + ) + + if len(errorWrites) > 0 { + diagMsg += "\n Errors:\n " + strings.Join(errorWrites, "\n ") + } + + if len(incompleteWrites) > 0 { + diagMsg += "\n Incomplete writes:\n " + strings.Join(incompleteWrites, "\n ") + } + + fmt.Fprintln(os.Stderr, diagMsg) +} + +func (a *adapter) handleSingleWriter(output io.Writer, contents []byte, entry logEntry) { + bytesWritten, err := output.Write(contents) + if err != nil || bytesWritten != len(contents) { + fmt.Fprintf(os.Stderr, + "Write issue detected:\n"+ + " Level: %s\n"+ + " Message: %q\n"+ + " Writer type: %T\n"+ + " Bytes written: %d/%d\n"+ + " Error: %v\n", + entry.Level, + entry.Message, + output, + bytesWritten, + len(contents), + err, + ) + } +} + +// writeJSONLog formats and writes the log entry as JSON. +func (a *adapter) writeJSONLog(buf *bytes.Buffer, entry logEntry) { + // Pre-allocate a map with enough capacity for all fields + capacity := len(entry.Fields) + if !a.config.DisableTimestamp { + capacity++ + } + + if entry.Caller != "" { + capacity++ + } + + if entry.Error != nil { + capacity++ + } + + capacity += 2 // level and message are always present + + logMap := make(map[string]interface{}, capacity) + + // Add standard fields + logMap["level"] = entry.Level.String() + logMap["message"] = entry.Message + + if !a.config.DisableTimestamp { + logMap["timestamp"] = entry.Timestamp.Format(a.config.TimeFormat) + } + + if entry.Caller != "" { + logMap["caller"] = entry.Caller + } + + // Add all custom fields + for _, field := range entry.Fields { + logMap[field.Key] = field.Value + } + + // Add any additional fields configured globally + for _, field := range a.config.AdditionalFields { + logMap[field.Key] = field.Value + } + + // Marshal to JSON + encoder := json.NewEncoder(buf) + encoder.SetEscapeHTML(false) + + err := encoder.Encode(logMap) + if err != nil { + buf.WriteString(fmt.Sprintf("failed to marshal log entry to JSON: %s", err)) + } +} + +// writeTextLog formats and writes the log entry as human-readable text. +// +//nolint:cyclop +func (a *adapter) writeTextLog(buf *bytes.Buffer, entry logEntry) { + // Write timestamp if enabled + if !a.config.DisableTimestamp { + buf.WriteString(entry.Timestamp.Format(a.config.TimeFormat)) + buf.WriteByte(' ') + } + + // Write log level with fixed width padding + fmt.Fprintf(buf, "%-5s ", entry.Level.String()) + + // Write caller information if available + if entry.Caller != "" { + buf.WriteByte('[') + buf.WriteString(entry.Caller) + buf.WriteString("] ") + } + + // Write the message + buf.WriteString(entry.Message) + + // Write fields if present + if len(entry.Fields) > 0 || len(a.config.AdditionalFields) > 0 { + buf.WriteString(" {") + + // Write custom fields + for i, field := range entry.Fields { + if i > 0 { + buf.WriteString(", ") + } + + writeField(buf, field) + } + + // Write additional fields + if len(entry.Fields) > 0 && len(a.config.AdditionalFields) > 0 { + buf.WriteString(", ") + } + + for i, field := range a.config.AdditionalFields { + if i > 0 { + buf.WriteString(", ") + } + + writeField(buf, field) + } + + buf.WriteByte('}') + } +} + +// writeField formats and writes a single field. +func writeField(buf *bytes.Buffer, field logger.Field) { + buf.WriteString(field.Key) + buf.WriteString("=") + + // Handle different value types + switch val := field.Value.(type) { + case string: + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + case time.Time: + buf.WriteByte('"') + buf.WriteString(val.Format(time.RFC3339)) + buf.WriteByte('"') + case error: + buf.WriteByte('"') + buf.WriteString(val.Error()) + buf.WriteByte('"') + default: + fmt.Fprintf(buf, "%v", val) + } +} + +// WithContext adds contextual information to the log entry. +func (a *adapter) WithContext(ctx context.Context) logger.Logger { + // Extract relevant information from context + // Example: trace IDs, request IDs, etc. + fields := extractContextFields(ctx) + + return a.WithFields(fields...) +} + +// WithFields adds additional fields to the log entry. +func (a *adapter) WithFields(fields ...logger.Field) logger.Logger { + a.mu.Lock() + defer a.mu.Unlock() + + newAdapter := &adapter{ + config: a.config, + buffer: a.buffer, + done: a.done, + wg: a.wg, // Share the pointer to WaitGroup + fields: make([]logger.Field, len(a.fields), len(a.fields)+len(fields)), + } + copy(newAdapter.fields, a.fields) + newAdapter.fields = append(newAdapter.fields, fields...) + + return newAdapter +} + +// WithError adds an error field to the log entry. +func (a *adapter) WithError(err error) logger.Logger { + if err == nil { + return a + } + + fields := []logger.Field{ + {Key: "error", Value: err.Error()}, + } + + // If it's our custom error type, extract additional information + if wrappedErr, ok := err.(interface{ StackTrace() string }); ok { + fields = append(fields, logger.Field{ + Key: "stack_trace", + Value: wrappedErr.StackTrace(), + }) + } + + return a.WithFields(fields...) +} + +// log ensures entries are properly handled even during shutdown. +func (a *adapter) log(level logger.Level, msg string) { + if level < a.config.Level { + return + } + + entry := logEntry{ + Level: level, + Message: msg, + Fields: a.fields, + Timestamp: time.Now(), + } + + if a.config.EnableCaller { + entry.Caller = getCaller() + } + + // Try to send to buffer with a timeout + select { + case a.buffer <- entry: + // Successfully queued the entry + case <-time.After(bufferTimeout): + // Buffer full or shutdown in progress, fall back to synchronous write + a.writeLog(entry) + } +} + +func getCaller() string { + _, file, line, ok := runtime.Caller(callerDepth) + if !ok { + return "unknown" + } + + // Trim the file path to the last two directories + parts := strings.Split(file, "/") + //nolint:mnd + if len(parts) > 2 { + file = strings.Join(parts[len(parts)-2:], "/") + } + + return fmt.Sprintf("%s:%d", file, line) +} + +// Implement all the logging methods. +func (a *adapter) Trace(msg string) { a.log(logger.TraceLevel, msg) } +func (a *adapter) Debug(msg string) { a.log(logger.DebugLevel, msg) } +func (a *adapter) Info(msg string) { a.log(logger.InfoLevel, msg) } +func (a *adapter) Warn(msg string) { a.log(logger.WarnLevel, msg) } +func (a *adapter) Error(msg string) { a.log(logger.ErrorLevel, msg) } +func (a *adapter) Fatal(msg string) { a.log(logger.FatalLevel, msg) } +func (a *adapter) Tracef(format string, args ...interface{}) { a.Trace(fmt.Sprintf(format, args...)) } +func (a *adapter) Debugf(format string, args ...interface{}) { a.Debug(fmt.Sprintf(format, args...)) } +func (a *adapter) Infof(format string, args ...interface{}) { a.Info(fmt.Sprintf(format, args...)) } +func (a *adapter) Warnf(format string, args ...interface{}) { a.Warn(fmt.Sprintf(format, args...)) } +func (a *adapter) Errorf(format string, args ...interface{}) { a.Error(fmt.Sprintf(format, args...)) } +func (a *adapter) Fatalf(format string, args ...interface{}) { a.Fatal(fmt.Sprintf(format, args...)) } + +// GetLevel returns the current logging level for the adapter. +// This allows controlling the verbosity of the logging output. +func (a *adapter) GetLevel() logger.Level { + a.mu.RLock() + defer a.mu.RUnlock() + + return a.config.Level +} + +// SetLevel sets the logging level for the adapter. This allows controlling the +// verbosity of the logging output. +func (a *adapter) SetLevel(level logger.Level) { + a.mu.Lock() + defer a.mu.Unlock() + a.config.Level = level +} + +// Sync ensures all pending logs are written before shutdown. +func (a *adapter) Sync() error { + // Signal shutdown + close(a.done) + + // Close the buffer channel after signaling shutdown + close(a.buffer) + + // Wait for all pending writes to complete + a.wg.Wait() + + // Sync the underlying writer + if syncer, ok := a.config.Output.(interface{ Sync() error }); ok { + return syncer.Sync() + } + + return nil +} + +// Helper functions to extract context fields. +func extractContextFields(ctx context.Context) []logger.Field { + var fields []logger.Field + + // Example: Extract trace ID + if traceID := ctx.Value("trace_id"); traceID != nil { + fields = append(fields, logger.Field{ + Key: "trace_id", + Value: traceID, + }) + } + + return fields +} diff --git a/internal/logger/adapter/ewrap.go b/internal/logger/adapter/ewrap.go new file mode 100644 index 0000000..fea601d --- /dev/null +++ b/internal/logger/adapter/ewrap.go @@ -0,0 +1,16 @@ +package adapter + +import ( + "os" + + "github.com/hyp3rd/ewrap/pkg/ewrap/adapters" + "github.com/rs/zerolog" +) + +// NewZerologAdapter creates a new ZerologAdapter instance that wraps the zerolog logger. +// The zerolog logger is configured to write to stdout and include timestamps. +func NewZerologAdapter() *adapters.ZerologAdapter { + zerologLogger := zerolog.New(os.Stdout).With().Timestamp().Logger() + + return adapters.NewZerologAdapter(zerologLogger) +} diff --git a/internal/logger/config.go b/internal/logger/config.go new file mode 100644 index 0000000..f066943 --- /dev/null +++ b/internal/logger/config.go @@ -0,0 +1,58 @@ +package logger + +import ( + "io" + "os" + "time" +) + +const ( + // DefaultTimeFormat is the default time format for log entries. + DefaultTimeFormat = time.RFC3339 + // DefaultLevel is the default logging level. + DefaultLevel = InfoLevel + // DefaultBufferSize is the default size of the log buffer. + DefaultBufferSize = 4096 + // DefaultAsyncBufferSize is the default size of the async log buffer. + DefaultAsyncBufferSize = 1024 +) + +// Config holds configuration for the logger. +type Config struct { + // Level is the minimum level to log + Level Level + // Output is where the logs will be written + Output io.Writer + // EnableStackTrace enables stack trace for error and fatal levels + EnableStackTrace bool + // EnableCaller adds the caller information to log entries + EnableCaller bool + // TimeFormat specifies the format for timestamps + TimeFormat string + // EnableJSON enables JSON output format + EnableJSON bool + // BufferSize sets the size of the log buffer + BufferSize int + // AsyncBufferSize sets the size of the async log buffer + AsyncBufferSize int + // DisableTimestamp disables timestamp in log entries + DisableTimestamp bool + // AdditionalFields adds these fields to all log entries + AdditionalFields []Field +} + +// DefaultConfig returns the default logger configuration. +func DefaultConfig() Config { + return Config{ + // Set a default output destination (os.Stdout) + Output: os.Stdout, + Level: DefaultLevel, + EnableStackTrace: true, + EnableCaller: true, + TimeFormat: DefaultTimeFormat, + EnableJSON: false, // Changed to false for better console readability by default + BufferSize: DefaultBufferSize, + AsyncBufferSize: DefaultAsyncBufferSize, + AdditionalFields: make([]Field, 0), // Initialize empty slice + } +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 0000000..c80dd94 --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,97 @@ +package logger + +import ( + "context" +) + +// Level represents the severity of a log message. +type Level uint8 + +const ( + // TraceLevel represents verbose debugging information. + TraceLevel Level = iota + // DebugLevel represents debugging information. + DebugLevel + // InfoLevel represents general operational information. + InfoLevel + // WarnLevel represents warning messages. + WarnLevel + // ErrorLevel represents error messages. + ErrorLevel + // FatalLevel represents fatal error messages. + FatalLevel +) + +// String returns the string representation of a log level. +func (l Level) String() string { + switch l { + case TraceLevel: + return "TRACE" + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case FatalLevel: + return "FATAL" + default: + return "UNKNOWN" + } +} + +// Field represents a key-value pair in structured logging. +type Field struct { + Key string + Value interface{} +} + +// Logger defines the interface for logging operations. +type Logger interface { + // Log methods for different levels + Trace(msg string) + Debug(msg string) + Info(msg string) + Warn(msg string) + Error(msg string) + Fatal(msg string) + + // Formatted log methods + FormattedLogger + + Methods +} + +// Methods defines the interface for logging methods. +type Methods interface { + // WithContext adds context information to the logger + WithContext(ctx context.Context) Logger + // WithFields adds structured fields to the logger + WithFields(fields ...Field) Logger + // WithError adds an error to the logger + WithError(err error) Logger + // GetLevel returns the current logging level + GetLevel() Level + // SetLevel sets the logging level + SetLevel(level Level) + // Sync ensures all logs are written + Sync() error +} + +// FormattedLogger defines the interface for logging formatted messages. +type FormattedLogger interface { + // Tracef logs a message at the Trace level + Tracef(format string, args ...interface{}) + // Debugf logs a message at the Debug level + Debugf(format string, args ...interface{}) + // Infof logs a message at the Info level + Infof(format string, args ...interface{}) + // Warnf logs a message at the Warn level + Warnf(format string, args ...interface{}) + // Errorf logs a message at the Error level + Errorf(format string, args ...interface{}) + // Fatalf logs a message at the Fatal level + Fatalf(format string, args ...interface{}) +} diff --git a/internal/logger/output/compression.go b/internal/logger/output/compression.go new file mode 100644 index 0000000..ccef58f --- /dev/null +++ b/internal/logger/output/compression.go @@ -0,0 +1,184 @@ +package output + +import ( + "compress/gzip" + "io" + "os" + "path/filepath" + "sync" + + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +const bufferSize = 32 * 1024 // 32KB buffer + +// compressFile compresses the given file using gzip compression. +// The original file is removed after successful compression. +// This method is designed to run in the background to avoid blocking logging operations. +func (w *FileWriter) compressFile(path string) { + // We'll use a WaitGroup to ensure proper cleanup in case of panic + var wg sync.WaitGroup + + wg.Add(1) + + go func() { + defer wg.Done() + defer func() { + if r := recover(); r != nil { + // If panic occurs, ensure we don't leave partial files + cleanupCompression(path) + } + }() + + if err := w.performCompression(path); err != nil { + // Log the error but don't fail - this is a background operation + // In a real application, you might want to send this to an error channel + // or use your error reporting system + _, _ = os.Stderr.WriteString("Error compressing log file: " + err.Error() + "\n") + } + }() + + wg.Wait() +} + +// performCompression handles the actual compression work. +func (w *FileWriter) performCompression(path string) error { + // Open the source file + source, err := os.Open(path) + if err != nil { + return ewrap.Wrapf(err, "opening source file"). + WithMetadata("path", path) + } + defer source.Close() + + // Create the compressed file + compressedPath := path + ".gz" + //nolint:mnd + compressed, err := os.OpenFile(compressedPath, os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return ewrap.Wrapf(err, "creating compressed file"). + WithMetadata("path", compressedPath) + } + + defer compressed.Close() + + // Create gzip writer with best compression + gzipWriter, err := gzip.NewWriterLevel(compressed, gzip.BestCompression) + if err != nil { + return ewrap.Wrapf(err, "creating gzip writer") + } + defer gzipWriter.Close() + + // Set the original file name in the gzip header + gzipWriter.Name = filepath.Base(path) + + // Create a buffer for copying + + buffer := make([]byte, bufferSize) + + // Copy the file content in chunks + if err := copyWithBuffer(gzipWriter, source, buffer); err != nil { + // If compression fails, clean up the partial compressed file + os.Remove(compressedPath) + + return ewrap.Wrapf(err, "copying file content") + } + + // Ensure all data is written + if err := gzipWriter.Close(); err != nil { + os.Remove(compressedPath) + + return ewrap.Wrapf(err, "closing gzip writer") + } + + if err := compressed.Sync(); err != nil { + os.Remove(compressedPath) + + return ewrap.Wrapf(err, "syncing compressed file") + } + + if err := compressed.Close(); err != nil { + os.Remove(compressedPath) + + return ewrap.Wrapf(err, "closing compressed file") + } + + // Verify the compressed file exists and has content + if err := verifyCompressedFile(compressedPath); err != nil { + os.Remove(compressedPath) + + return err + } + + // Remove the original file only after successful compression + if err := os.Remove(path); err != nil { + // If we can't remove the original, remove the compressed file to avoid duplicates + os.Remove(compressedPath) + + return ewrap.Wrapf(err, "removing original file"). + WithMetadata("path", path) + } + + return nil +} + +// copyWithBuffer copies from src to dst using the provided buffer. +func copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) error { + for { + n, err := src.Read(buf) + if n > 0 { + if _, writerErr := dst.Write(buf[:n]); writerErr != nil { + return ewrap.Wrapf(writerErr, "writing to destination") + } + } + + if err == io.EOF { + break + } + + if err != nil { + return ewrap.Wrapf(err, "reading from source") + } + } + + return nil +} + +// verifyCompressedFile checks if the compressed file exists and has content. +func verifyCompressedFile(path string) error { + info, err := os.Stat(path) + if err != nil { + return ewrap.Wrapf(err, "verifying compressed file"). + WithMetadata("path", path) + } + + if info.Size() == 0 { + return ewrap.New("compressed file is empty"). + WithMetadata("path", path) + } + + // Optional: Verify the file is a valid gzip file + f, err := os.Open(path) + if err != nil { + return ewrap.Wrapf(err, "opening compressed file for verification") + } + defer f.Close() + + gr, err := gzip.NewReader(f) + if err != nil { + return ewrap.Wrapf(err, "verifying gzip format") + } + + gr.Close() + + return nil +} + +// cleanupCompression removes both the original and compressed files +// in case of a critical error or panic during compression. +func cleanupCompression(path string) { + // Don't remove the original file in cleanup + // Better to keep uncompressed logs than lose them + compressedPath := path + ".gz" + os.Remove(compressedPath) +} diff --git a/internal/logger/output/output.go b/internal/logger/output/output.go new file mode 100644 index 0000000..76e2646 --- /dev/null +++ b/internal/logger/output/output.go @@ -0,0 +1,537 @@ +package output + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +const ( + defaultMaxSizeMB = 100 + bytesPerMB = 1024 * 1024 +) + +// Writer defines an interface for log output destinations. +type Writer interface { + io.Writer + // Sync ensures all data is written. + Sync() error + // Close releases any resources. + Close() error +} + +// FileWriter implements Writer for file-based logging. +type FileWriter struct { + mu sync.Mutex + file *os.File + path string + maxSize int64 + size int64 + compress bool +} + +// FileConfig holds configuration for file output. +type FileConfig struct { + // Path is the log file path + Path string + // MaxSize is the maximum size in bytes before rotation + MaxSize int64 + // Compress determines if rotated files should be compressed + Compress bool + // FileMode sets the permissions for new log files + FileMode os.FileMode +} + +// NewFileWriter creates a new file-based log writer. +func NewFileWriter(config FileConfig) (*FileWriter, error) { + if config.Path == "" { + return nil, ewrap.New("log file path is required") + } + + if config.MaxSize == 0 { + config.MaxSize = defaultMaxSizeMB * bytesPerMB // 100MB default + } + + if config.FileMode == 0 { + config.FileMode = 0o644 + } + + // Ensure directory exists + dir := filepath.Dir(config.Path) + //nolint:mnd + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, ewrap.Wrapf(err, "creating log directory"). + WithMetadata("path", dir) + } + + // Open or create the log file + file, err := os.OpenFile(config.Path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, config.FileMode) + if err != nil { + return nil, ewrap.Wrapf(err, "opening log file"). + WithMetadata("path", config.Path) + } + + // Get initial file size + info, err := file.Stat() + if err != nil { + file.Close() + + return nil, ewrap.Wrapf(err, "getting file stats"). + WithMetadata("path", config.Path) + } + + return &FileWriter{ + file: file, + path: config.Path, + maxSize: config.MaxSize, + size: info.Size(), + compress: config.Compress, + }, nil +} + +// Write implements io.Writer. +func (w *FileWriter) Write(data []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + // Check if rotation is needed + if w.size+int64(len(data)) > w.maxSize { + if err := w.rotate(); err != nil { + return 0, ewrap.Wrapf(err, "rotating log file") + } + } + + bytesWritten, err := w.file.Write(data) + if err != nil { + return bytesWritten, ewrap.Wrap(err, "failed writing to log file") + } + + w.size += int64(bytesWritten) + + return bytesWritten, nil // Return nil error on success, don't wrap it +} + +// rotate moves the current log file to a timestamped backup +// and creates a new log file. +func (w *FileWriter) rotate() error { + // Close current file + if err := w.file.Close(); err != nil { + return ewrap.Wrapf(err, "closing current log file") + } + + // Generate backup filename with timestamp + timestamp := time.Now().Format("2006-01-02T15-04-05") + backupPath := filepath.Join( + filepath.Dir(w.path), + fmt.Sprintf("%s.%s", filepath.Base(w.path), timestamp), + ) + + // Rename current file to backup + if err := os.Rename(w.path, backupPath); err != nil { + return ewrap.Wrapf(err, "renaming log file"). + WithMetadata("from", w.path). + WithMetadata("to", backupPath) + } + + // Compress backup file if enabled + if w.compress { + go w.compressFile(backupPath) // Run compression in background + } + + // Create new log file + //nolint:mnd + file, err := os.OpenFile(w.path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644) + if err != nil { + return ewrap.Wrapf(err, "creating new log file") + } + + w.file = file + w.size = 0 + + return nil +} + +func (w *FileWriter) Sync() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.file == nil { + return nil // Already closed, no error + } + + err := w.file.Sync() + if err != nil { + return ewrap.Wrapf(err, "syncing log file") + } + + return nil // Clean success +} + +func (w *FileWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.file == nil { + return nil // Already closed, no error + } + + // First sync any remaining data + if err := w.file.Sync(); err != nil { + return ewrap.Wrapf(err, "final sync before close") + } + + // Then close the file + err := w.file.Close() + if err != nil { + return ewrap.Wrapf(err, "closing log file") + } + + w.file = nil // Mark as closed + + return nil // Clean success +} + +// ConsoleWriter implements Writer for console output with color support. +type ConsoleWriter struct { + out io.Writer + mode ColorMode +} + +// ColorMode determines how colors are handled. +type ColorMode int + +const ( + // ColorModeAuto detects if the output supports colors. + ColorModeAuto ColorMode = iota + // ColorModeAlways forces color output. + ColorModeAlways + // ColorModeNever disables color output. + ColorModeNever +) + +// NewConsoleWriter creates a new ConsoleWriter that writes to the provided io.Writer with the specified ColorMode. +// If out is nil, it defaults to os.Stdout. +func NewConsoleWriter(out io.Writer, mode ColorMode) *ConsoleWriter { + if out == nil { + out = os.Stdout + } + + return &ConsoleWriter{ + out: out, + mode: mode, + } +} + +// Write writes the provided byte slice to the underlying output writer. +// It wraps any errors that occur during the write operation. +func (w *ConsoleWriter) Write(p []byte) (int, error) { + n, err := w.out.Write(p) + if err != nil { + return n, ewrap.Wrap(err, "failed writing to console output") + } + + return n, nil +} + +// Sync synchronizes the console output, skipping sync for stdout/stderr if it's not needed. +// It ignores "inappropriate ioctl for device" errors for terminal devices. +func (w *ConsoleWriter) Sync() error { + // For stdout/stderr, sync is not needed and can be safely skipped + if syncer, ok := w.out.(interface{ Sync() error }); ok { + if err := syncer.Sync(); err != nil { + // Ignore "inappropriate ioctl for device" errors for terminal devices + if strings.Contains(err.Error(), "inappropriate ioctl for device") { + return nil + } + + return ewrap.Wrapf(err, "syncing console output") + } + } + + return nil +} + +// Close closes the underlying output writer if it implements io.Closer. +// It wraps any errors that occur during the close operation. +func (w *ConsoleWriter) Close() error { + if closer, ok := w.out.(io.Closer); ok { + err := closer.Close() + if err != nil { + return ewrap.Wrapf(err, "closing console output") + } + } + + return nil // Return nil directly, don't wrap it +} + +// MultiWriter combines multiple writers into one. +type MultiWriter struct { + Writers []Writer + mu sync.RWMutex + // Add a debug name for each writer to help with diagnostics + writerNames map[Writer]string +} + +// NewMultiWriter creates a new writer that writes to all provided writers. +// It filters out nil writers and returns an error if no valid writers are provided. +func NewMultiWriter(writers ...Writer) (*MultiWriter, error) { + if len(writers) == 0 { + return nil, ewrap.New("at least one writer is required") + } + + validWriters := make([]Writer, 0, len(writers)) + writerNames := make(map[Writer]string) + + // Create descriptive names for each writer + for i, w := range writers { + if w != nil { + validWriters = append(validWriters, w) + // Store a descriptive name based on the writer type + writerNames[w] = fmt.Sprintf("%T[%d]", w, i) + } + } + + if len(validWriters) == 0 { + return nil, ewrap.New("no valid writers provided") + } + + return &MultiWriter{ + Writers: validWriters, + writerNames: writerNames, + }, nil +} + +// Write sends the output to all writers with detailed diagnostics. +func (mw *MultiWriter) Write(payload []byte) (int, error) { + mw.mu.RLock() + defer mw.mu.RUnlock() + + return mw.writeToWriters(payload) +} + +func (mw *MultiWriter) writeToWriters(payload []byte) (int, error) { + expectedBytes := len(payload) + results := mw.performWrites(payload, expectedBytes) + + successCount, failures := mw.processResults(results, expectedBytes) + + fmt.Fprintf(os.Stderr, "Total successes: %d/%d\n", successCount, len(results)) + + if len(failures) > 0 { + return expectedBytes, mw.createErrorReport(results, successCount, failures) + } + + return expectedBytes, nil +} + +func (mw *MultiWriter) performWrites(payload []byte, expectedBytes int) []WriteResult { + results := make([]WriteResult, 0, len(mw.Writers)) + + fmt.Fprintf(os.Stderr, "MultiWriter attempting to write %d bytes\n", expectedBytes) + + for _, writer := range mw.Writers { + if writer == nil { + continue + } + + n, err := writer.Write(payload) + result := WriteResult{ + Writer: writer, + Name: mw.writerNames[writer], + Bytes: n, + Err: err, + } + + fmt.Fprintf(os.Stderr, "Writer %s: wrote %d bytes, err: %v\n", + result.Name, result.Bytes, result.Err) + + results = append(results, result) + } + + return results +} + +func (mw *MultiWriter) processResults(results []WriteResult, expectedBytes int) (int, []string) { + successCount := 0 + + var failures []string + + for _, result := range results { + if result.Err == nil && result.Bytes == expectedBytes { + successCount++ + + fmt.Fprintf(os.Stderr, "Writer %s succeeded\n", result.Name) + } else { + reason := "incomplete write" + if result.Err != nil { + reason = result.Err.Error() + } + + failures = append(failures, fmt.Sprintf( + "%s: wrote %d/%d bytes (%s)", + result.Name, + result.Bytes, + expectedBytes, + reason, + )) + } + } + + return successCount, failures +} + +func (mw *MultiWriter) createErrorReport(results []WriteResult, successCount int, failures []string) error { + var diagMsg strings.Builder + + diagMsg.WriteString("Write operation status:\n") + + fmt.Fprintf(&diagMsg, " Total writers: %d\n", len(results)) + fmt.Fprintf(&diagMsg, " Successful writes: %d\n", successCount) + fmt.Fprintf(&diagMsg, " Failed writes: %d\n", len(failures)) + fmt.Fprintf(&diagMsg, " Failures:\n") + + for _, failure := range failures { + fmt.Fprintf(&diagMsg, " - %s\n", failure) + } + + return ewrap.New(diagMsg.String()) +} + +// Sync ensures all writers are synced with comprehensive diagnostics. +func (mw *MultiWriter) Sync() error { + mw.mu.RLock() + defer mw.mu.RUnlock() + + fmt.Fprintf(os.Stderr, "DEBUG: Starting sync operation for %d writers\n", len(mw.Writers)) + + var syncErrors []string + + successCount := 0 + + for i, writer := range mw.Writers { + if writer == nil { + fmt.Fprintf(os.Stderr, "DEBUG: Writer %d is nil, skipping\n", i) + + continue + } + + fmt.Fprintf(os.Stderr, "DEBUG: Syncing writer %d (%T)\n", i, writer) + err := writer.Sync() + + if err != nil { + msg := fmt.Sprintf("%T: %v", writer, err) + fmt.Fprintf(os.Stderr, "DEBUG: Sync failed: %s\n", msg) + syncErrors = append(syncErrors, msg) + } else { + fmt.Fprintf(os.Stderr, "DEBUG: Sync succeeded for writer %d\n", i) + + successCount++ + } + } + + fmt.Fprintf(os.Stderr, "DEBUG: Sync complete. Successes: %d, Failures: %d\n", + successCount, len(syncErrors)) + + if len(syncErrors) > 0 { + return ewrap.New("sync operation partially failed"). + WithMetadata("failed_syncs", syncErrors). + WithMetadata("successful_syncs", successCount). + WithMetadata("total_writers", len(mw.Writers)) + } + + return nil +} + +// Close closes all writers with detailed cleanup tracking. +func (mw *MultiWriter) Close() error { + mw.mu.Lock() + defer mw.mu.Unlock() + + fmt.Fprintf(os.Stderr, "DEBUG: Starting close operation for %d writers\n", len(mw.Writers)) + + var closeErrors []string + + successCount := 0 + + for i, writer := range mw.Writers { + if writer == nil { + fmt.Fprintf(os.Stderr, "DEBUG: Writer %d is nil, skipping\n", i) + + continue + } + + fmt.Fprintf(os.Stderr, "DEBUG: Closing writer %d (%T)\n", i, writer) + err := writer.Close() + + if err != nil { // Simplified error check + msg := fmt.Sprintf("%T: %v", writer, err) + fmt.Fprintf(os.Stderr, "DEBUG: Close failed: %s\n", msg) + closeErrors = append(closeErrors, msg) + } else { + fmt.Fprintf(os.Stderr, "DEBUG: Close succeeded for writer %d\n", i) + + successCount++ + } + } + + fmt.Fprintf(os.Stderr, "DEBUG: Close complete. Successes: %d, Failures: %d\n", + successCount, len(closeErrors)) + + // Clear writers slice + for i := range mw.Writers { + mw.Writers[i] = nil + } + + mw.Writers = nil + + if len(closeErrors) > 0 { + return ewrap.New("close operation partially failed"). + WithMetadata("failed_closes", closeErrors). + WithMetadata("successful_closes", successCount) + } + + return nil +} + +// AddWriter adds a new writer to the MultiWriter. +func (mw *MultiWriter) AddWriter(writer Writer) error { + if writer == nil { + return ewrap.New("cannot add nil writer") + } + + mw.mu.Lock() + defer mw.mu.Unlock() + + mw.Writers = append(mw.Writers, writer) + + return nil +} + +// RemoveWriter removes a writer from the MultiWriter. +func (mw *MultiWriter) RemoveWriter(writer Writer) { + if writer == nil { + return + } + + mw.mu.Lock() + defer mw.mu.Unlock() + + for i, existingWriter := range mw.Writers { + if existingWriter == writer { + // Remove the writer by replacing it with the last element + // and truncating the slice + lastIdx := len(mw.Writers) - 1 + mw.Writers[i] = mw.Writers[lastIdx] + mw.Writers[lastIdx] = nil // Clear the reference + mw.Writers = mw.Writers[:lastIdx] + + break + } + } +} diff --git a/internal/logger/output/types.go b/internal/logger/output/types.go new file mode 100644 index 0000000..539f2df --- /dev/null +++ b/internal/logger/output/types.go @@ -0,0 +1,10 @@ +package output + +import "io" + +type WriteResult struct { + Writer io.Writer + Name string + Bytes int + Err error +} diff --git a/internal/repository/pg/db.go b/internal/repository/pg/db.go new file mode 100644 index 0000000..2b2322f --- /dev/null +++ b/internal/repository/pg/db.go @@ -0,0 +1,266 @@ +package pg + +import ( + "context" + "strconv" + "strings" + "time" + + "github.com/hyp3rd/base/internal/config" + "github.com/hyp3rd/base/internal/logger" + "github.com/hyp3rd/ewrap/pkg/ewrap" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +// Manager is a struct that manages the connection to a PostgreSQL database. +// It holds a connection pool, the database configuration, and a logger. +type Manager struct { + pool *pgxpool.Pool + cfg *config.DBConfig + logger logger.Logger +} + +// New creates a new instance of the Manager struct, which manages the connection +// to a PostgreSQL database. It takes a DBConfig and a Logger as arguments, and +// initializes the cfg and logger fields of the Manager. +func New(cfg *config.DBConfig, logger logger.Logger) *Manager { + return &Manager{ + cfg: cfg, + logger: logger, + } +} + +// Connect establishes a connection to the PostgreSQL database using the configuration +// provided in the Manager. It attempts to connect with retries, and verifies the +// connection before returning. If the connection cannot be established after the +// configured number of attempts, an error is returned. +func (m *Manager) Connect(ctx context.Context) error { + var err error + + // Configure the connection pool + poolConfig, err := pgxpool.ParseConfig(m.cfg.DSN) + if err != nil { + return ewrap.Wrapf(err, "parsing database config") + } + + // Apply configuration + poolConfig.MaxConns = m.cfg.MaxOpenConns + poolConfig.MinConns = m.cfg.MaxIdleConns + poolConfig.MaxConnLifetime = m.cfg.ConnMaxLifetime + + // Attempt to connect with retries + for attempt := 1; attempt <= m.cfg.ConnAttempts; attempt++ { + // Create a context with timeout for this attempt + attemptCtx, cancel := context.WithTimeout(ctx, m.cfg.ConnTimeout) + + m.pool, err = pgxpool.NewWithConfig(attemptCtx, poolConfig) + + cancel() + + if err == nil { + break + } + + if attempt == m.cfg.ConnAttempts { + return ewrap.Wrapf(err, "failed to connect to database after %d attempts", attempt). + WithMetadata("dsn", maskDSN(m.cfg.DSN)) + } + + m.logger.Warnf("Database connection attempt %d/%d failed: %v", + attempt, m.cfg.ConnAttempts, err) + + select { + case <-ctx.Done(): + return ewrap.Wrap(ctx.Err(), "context cancelled during connection attempts") + case <-time.After(time.Second * time.Duration(attempt)): + // Exponential backoff + continue + } + } + + // Verify the connection + if err := m.Ping(ctx); err != nil { + return ewrap.Wrapf(err, "verifying database connection") + } + + return nil +} + +// Ping checks if the database connection is active by pinging the database. +// If the connection is not established or the ping fails, it returns an error. +func (m *Manager) Ping(ctx context.Context) error { + if m.pool == nil { + return ewrap.New("database not connected") + } + + // Create a context with timeout for this attempt + attemptCtx, cancel := context.WithTimeout(ctx, m.cfg.ConnTimeout) + defer cancel() + + err := m.pool.Ping(attemptCtx) + if err != nil { + return ewrap.Wrapf(err, "pinging database") + } + + return nil +} + +// Close closes the database connection. +func (m *Manager) Close() { + if m.pool != nil { + m.pool.Close() + } +} + +// GetPool returns the connection pool. +func (m *Manager) GetPool() *pgxpool.Pool { + return m.pool +} + +// Stats returns the current pool statistics. If the connection pool is not +// established, it returns nil. If the pool.Stat() method returns nil, it +// returns a new pgxpool.Stat instance. +func (m *Manager) Stats() *pgxpool.Stat { + if m.pool == nil { + return nil + } + + // Return the current pool statistics + if m.pool.Stat() == nil { + return &pgxpool.Stat{} + } + + return m.pool.Stat() +} + +// IsConnected checks if the database connection is active. It verifies the connection +// by calling the Ping method. If the connection is not established or the Ping +// fails, it returns false. +func (m *Manager) IsConnected(ctx context.Context) bool { + if m.pool == nil { + return false + } + + // Verify the connection + if err := m.Ping(ctx); err != nil { + m.logger.Warnf("Database connection failed: %v", err) + + return false + } + + return true +} + +// Transaction executes the provided function within a database transaction. If the +// function returns an error, the transaction is rolled back. Otherwise, the +// transaction is committed. +// +// The provided function is passed the current context and a pgx.Tx instance to +// execute database operations within the transaction. +// +// If the database connection is not established, an error is returned. +func (m *Manager) Transaction(ctx context.Context, fn func(context.Context, pgx.Tx) error) error { + if m.pool == nil { + return ewrap.New("database not connected") + } + + tx, err := m.pool.Begin(ctx) + if err != nil { + return ewrap.Wrapf(err, "beginning transaction") + } + + // Execute the provided function + if err := fn(ctx, tx); err != nil { + // Attempt to rollback on error + if rbErr := tx.Rollback(ctx); rbErr != nil { + return ewrap.New("transaction failed"). + WithMetadata("exec_error", err). + WithMetadata("rollback_error", rbErr) + } + + return ewrap.Wrapf(err, "executing transaction") + } + + // Commit the transaction + if err := tx.Commit(ctx); err != nil { + return ewrap.Wrapf(err, "committing transaction") + } + + return nil +} + +// maskDSN takes a database connection string (DSN) and returns a masked version +// of the DSN, hiding sensitive information like the password. +func maskDSN(dsn string) string { + if dsn == "" { + return "" + } + + config, err := pgx.ParseConfig(dsn) + if err != nil { + return "[INVALID_DSN]" + } + + masked := buildMaskedDSN(config) + + return masked +} + +func buildMaskedDSN(config *pgx.ConnConfig) string { + masked := "postgres://" + + if config.User != "" { + masked += config.User + } + + if config.Password != "" { + masked += ":********" + } + + if config.Host != "" { + masked += "@" + config.Host + if config.Port != 0 { + masked += ":" + strconv.Itoa(int(config.Port)) + } + } + + if config.Database != "" { + masked += "/" + config.Database + } + + masked += addRuntimeParams(config.RuntimeParams) + + return masked +} + +func addRuntimeParams(params map[string]string) string { + if len(params) == 0 { + return "" + } + + var paramStrings []string + + for key, value := range params { + if isSensitiveParam(key) { + paramStrings = append(paramStrings, key+"=[MASKED]") + } else { + paramStrings = append(paramStrings, key+"="+value) + } + } + + return "?" + strings.Join(paramStrings, "&") +} + +// isSensitiveParam checks if a connection parameter is sensitive. +func isSensitiveParam(param string) bool { + sensitiveParams := map[string]bool{ + "password": true, + "sslkey": true, + "sslcert": true, + "sslrootcert": true, + "sslpassword": true, + } + + return sensitiveParams[param] +} diff --git a/internal/repository/pg/monitor.go b/internal/repository/pg/monitor.go new file mode 100644 index 0000000..ec9b126 --- /dev/null +++ b/internal/repository/pg/monitor.go @@ -0,0 +1,389 @@ +package pg + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/hyp3rd/base/internal/logger" + "github.com/jackc/pgx/v5/pgxpool" +) + +const ( + // MaxMetricsToStore is the maximum number of metrics to store in the database. + MaxMetricsToStore = 10000 // Keep last 10k metrics + // HealthStatusMaxErrors is the maximum number of errors to keep in the health status. + HealthStatusMaxErrors = 100 + // MonitorInterval is the interval at which the monitor will check the health of the database. + MonitorInterval = 10 * time.Second +) + +// PoolStats represents statistics about the connection pool. +// It includes information about active queries, queued queries, slow queries, +// failed queries, average query time, the number of prepared statements, +// and the last error that occurred. +type PoolStats struct { + *pgxpool.Stat + // Connection metrics + ActiveQueries int64 // Currently executing queries + QueuedQueries int64 // Queries waiting for execution + SlowQueries int64 // Queries exceeding threshold + FailedQueries int64 // Queries that resulted in errors + + // Connection timing + AcquireCount int64 // Total number of connection acquisitions + AcquireDuration time.Duration // Average time to acquire a connection + WaitingConnections int64 // Number of goroutines waiting for a connection + IdleConnections int64 // Current number of idle connections + + // Connection lifecycle + MaxLifetimeDropped int64 // Connections dropped due to max lifetime + MaxIdleTimeDropped int64 // Connections dropped due to idle timeout + ConnectionRefusals int64 // Connection requests that were refused + // PendingConnections represents connections that exist in the pool + // but are neither idle nor acquired. These may be connections + // in the process of being established or closed. + PendingConnections int64 + // Performance metrics + AverageQueryTime time.Duration // Average query execution time + PreparedStmtCount int // Number of prepared statements + + // Error tracking + LastError error // Last error that occurred + LastErrorTime time.Time // When the last error occurred + ErrorCount int64 // Total number of errors +} + +// PreparedStatement represents a prepared SQL statement in the database. +// It includes information about the statement, such as the query text, +// a unique statement ID, when the statement was created, when it was +// last used, how many times it has been used, the average execution +// time, and the total execution time. The struct is protected by a +// read-write mutex to allow concurrent access. +type PreparedStatement struct { + Query string + StatementID string + CreatedAt time.Time + LastUsed time.Time + UsageCount int64 + AverageExecTime time.Duration + TotalExecTime time.Duration + mu sync.RWMutex +} + +// HealthStatus represents the health status of a database connection. +// It includes information about the connection status, connection pool statistics, +// latency, last checked time, replication lag (for replicas), and recent errors. +// The MaxErrors field specifies the maximum number of errors to keep in the Errors slice. +type HealthStatus struct { + Connected bool + PoolStats *PoolStats + Latency time.Duration + LastChecked time.Time + ReplicationLag *time.Duration // Only for replicas + Errors []error // Recent errors + MaxErrors int // Maximum number of errors to keep +} + +// Monitor is a struct that manages the monitoring of a database connection pool. +// It includes information about the health status of the connection, prepared statements, +// slow query threshold, and metrics collected from the database. +type Monitor struct { + manager *Manager + healthStatus *HealthStatus + preparedStmts map[string]*PreparedStatement + slowQueryThreshold time.Duration + mu sync.RWMutex + stopChan chan struct{} + metrics []QueryMetric + maxMetrics int +} + +// QueryMetric represents a metric collected for a database query, including the +// query text, the duration of the query, the number of rows affected, the +// timestamp when the query was executed, and any error that occurred during +// the query execution. +type QueryMetric struct { + Query string + Duration time.Duration + RowsAffected int64 + Timestamp time.Time + Error error +} + +// NewMonitor creates a new Monitor instance with the given slow query threshold. +// The Monitor is responsible for managing the monitoring of a database connection pool, +// including collecting health status, prepared statements, and query metrics. +func (m *Manager) NewMonitor(slowQueryThreshold time.Duration) *Monitor { + return &Monitor{ + manager: m, + healthStatus: &HealthStatus{ + MaxErrors: HealthStatusMaxErrors, + PoolStats: &PoolStats{}, // Initialize PoolStats + }, + preparedStmts: make(map[string]*PreparedStatement), + slowQueryThreshold: slowQueryThreshold, + stopChan: make(chan struct{}), + maxMetrics: MaxMetricsToStore, + } +} + +// Start runs a background goroutine that periodically collects metrics for the +// database connection pool managed by the Monitor. It uses a ticker to trigger +// the collection of metrics at a fixed interval, and stops the ticker when the +// stopChan is closed or the context is canceled. +func (m *Monitor) Start(ctx context.Context) { + ticker := time.NewTicker(MonitorInterval) + + go func() { + for { + select { + case <-ticker.C: + m.collectMetrics(ctx) + case <-m.stopChan: + ticker.Stop() + + return + case <-ctx.Done(): + ticker.Stop() + + return + } + } + }() +} + +// Stop stops the background goroutine that periodically collects metrics for the database connection pool. +func (m *Monitor) Stop() { + close(m.stopChan) +} + +// collectMetrics gathers current pool statistics and health information. It collects +// the pool statistics using collectPoolStats, updates the health status by pinging +// the database, logs the pool statistics, and cleans up old prepared statements. +// This method is called periodically by the Start method to collect and maintain +// the monitoring data for the database connection pool. +func (m *Monitor) collectMetrics(ctx context.Context) { + m.mu.Lock() + defer m.mu.Unlock() + + // Collect pool statistics + stats := m.collectPoolStats() + if stats == nil { + return + } + + // Update health status + start := time.Now() + err := m.manager.Ping(ctx) + latency := time.Since(start) + + m.healthStatus.Connected = err == nil + m.healthStatus.Latency = latency + m.healthStatus.LastChecked = time.Now() + m.healthStatus.PoolStats = stats + + if err != nil { + stats.LastError = err + stats.LastErrorTime = time.Now() + atomic.AddInt64(&stats.ErrorCount, 1) + m.addError(err) + } + + // Log the statistics + m.logPoolStats() + + // Clean up old prepared statements + m.cleanupPreparedStatements() +} + +// updatePoolStats atomically updates the pool statistics. It takes a *PoolStats +// argument and updates the various statistics fields using atomic operations. +// This ensures the statistics are updated in a thread-safe manner. +func (m *Monitor) updatePoolStats(stats *PoolStats) { + if stats == nil { + return + } + + // Atomic updates for all counters + atomic.StoreInt64(&stats.ActiveQueries, int64(stats.Stat.AcquiredConns())) + atomic.StoreInt64(&stats.IdleConnections, int64(stats.Stat.IdleConns())) + atomic.StoreInt64(&stats.PendingConnections, int64(stats.Stat.TotalConns()-stats.Stat.IdleConns()-stats.Stat.AcquiredConns())) + + // Update acquisition metrics + atomic.StoreInt64(&stats.AcquireCount, stats.Stat.AcquireCount()) + + // Calculate average acquire duration if we have acquisitions + if stats.AcquireCount > 0 { + avgDuration := stats.Stat.AcquireDuration().Nanoseconds() / stats.AcquireCount + atomic.StoreInt64((*int64)(&stats.AcquireDuration), avgDuration) + } +} + +// collectPoolStats gathers comprehensive pool statistics for the database connection pool. +// It retrieves the current pool statistics from the manager, copies relevant values from the +// existing health status, and then updates the pool statistics using updatePoolStats. +// The resulting PoolStats struct is returned, or nil if the manager's Stats() method returns nil. +func (m *Monitor) collectPoolStats() *PoolStats { + poolStat := m.manager.Stats() + if poolStat == nil { + return nil + } + + stats := &PoolStats{ + Stat: poolStat, + // Copy existing atomic values + ActiveQueries: atomic.LoadInt64(&m.healthStatus.PoolStats.ActiveQueries), + SlowQueries: atomic.LoadInt64(&m.healthStatus.PoolStats.SlowQueries), + FailedQueries: atomic.LoadInt64(&m.healthStatus.PoolStats.FailedQueries), + ErrorCount: atomic.LoadInt64(&m.healthStatus.PoolStats.ErrorCount), + + // Copy non-atomic values under lock + LastError: m.healthStatus.PoolStats.LastError, + LastErrorTime: m.healthStatus.PoolStats.LastErrorTime, + PreparedStmtCount: len(m.preparedStmts), + } + + // Update the statistics + m.updatePoolStats(stats) + + return stats +} + +// logPoolStats outputs detailed pool statistics. It collects comprehensive pool statistics using collectPoolStats, +// and then logs the statistics using the logger. It also logs warnings for concerning metrics, such as waiting +// connections and connection refusals. +func (m *Monitor) logPoolStats() { + stats := m.collectPoolStats() + if stats == nil { + return + } + + // Create fields correctly + m.manager.logger.WithFields( + logger.Field{Key: "active_queries", Value: stats.ActiveQueries}, + logger.Field{Key: "idle_connections", Value: stats.IdleConnections}, + logger.Field{Key: "waiting_connections", Value: stats.WaitingConnections}, + logger.Field{Key: "acquire_count", Value: stats.AcquireCount}, + logger.Field{Key: "acquire_duration_ms", Value: stats.AcquireDuration.Milliseconds()}, + logger.Field{Key: "slow_queries", Value: stats.SlowQueries}, + logger.Field{Key: "failed_queries", Value: stats.FailedQueries}, + logger.Field{Key: "prepared_statements", Value: stats.PreparedStmtCount}, + logger.Field{Key: "error_count", Value: stats.ErrorCount}, + ).Info("Pool Statistics") + + // Log warnings for concerning metrics + if stats.WaitingConnections > 0 { + m.manager.logger.WithFields( + logger.Field{Key: "waiting_count", Value: stats.WaitingConnections}, + ).Warn("Connections waiting in pool") + } + + if stats.ConnectionRefusals > 0 { + m.manager.logger.WithFields( + logger.Field{Key: "refusal_count", Value: stats.ConnectionRefusals}, + ).Warn("Connection refusals detected") + } +} + +// TrackQuery records query execution metrics. It logs the query, duration, rows affected, and any errors that occurred during the query execution. It also tracks slow queries and failed queries in the health status. +func (m *Monitor) TrackQuery(query string, duration time.Duration, rowsAffected int64, err error) { + m.mu.Lock() + defer m.mu.Unlock() + + metric := QueryMetric{ + Query: query, + Duration: duration, + RowsAffected: rowsAffected, + Timestamp: time.Now(), + Error: err, + } + + // Update metrics + m.metrics = append(m.metrics, metric) + if len(m.metrics) > m.maxMetrics { + m.metrics = m.metrics[1:] + } + + // Track slow queries + if duration > m.slowQueryThreshold { + atomic.AddInt64(&m.healthStatus.PoolStats.SlowQueries, 1) + } + + if err != nil { + atomic.AddInt64(&m.healthStatus.PoolStats.FailedQueries, 1) + } +} + +// TrackPreparedStatement records metrics for a prepared SQL statement, including the usage count, last used time, total execution time, and average execution time. +// This function is used to track the usage and performance of prepared statements in the database connection pool. +func (m *Monitor) TrackPreparedStatement(query string, stmtID string, execTime time.Duration) { + m.mu.Lock() + defer m.mu.Unlock() + + stmt, exists := m.preparedStmts[query] + if !exists { + stmt = &PreparedStatement{ + Query: query, + StatementID: stmtID, + CreatedAt: time.Now(), + } + m.preparedStmts[query] = stmt + } + + stmt.mu.Lock() + stmt.UsageCount++ + stmt.LastUsed = time.Now() + stmt.TotalExecTime += execTime + stmt.AverageExecTime = stmt.TotalExecTime / time.Duration(stmt.UsageCount) + stmt.mu.Unlock() +} + +// cleanupPreparedStatements removes unused prepared statements. +func (m *Monitor) cleanupPreparedStatements() { + threshold := time.Now().Add(-1 * time.Hour) + + for query, stmt := range m.preparedStmts { + stmt.mu.RLock() + if stmt.LastUsed.Before(threshold) && stmt.UsageCount < 100 { + delete(m.preparedStmts, query) + } + stmt.mu.RUnlock() + } +} + +// addError adds an error to the health status. +func (m *Monitor) addError(err error) { + m.healthStatus.Errors = append(m.healthStatus.Errors, err) + if len(m.healthStatus.Errors) > m.healthStatus.MaxErrors { + m.healthStatus.Errors = m.healthStatus.Errors[1:] + } +} + +// GetHealthStatus returns a copy of the current health status of the database connection pool. +// The returned HealthStatus object is a snapshot of the current state and is safe to access +// without race conditions. +func (m *Monitor) GetHealthStatus() *HealthStatus { + m.mu.RLock() + defer m.mu.RUnlock() + + // Return a copy to prevent races + status := *m.healthStatus + + return &status +} + +// GetPoolMetrics returns a copy of the current query metrics for the database connection pool. +// The returned slice of QueryMetric objects is a snapshot of the current state and is safe to access +// without race conditions. +func (m *Monitor) GetPoolMetrics() []QueryMetric { + m.mu.RLock() + defer m.mu.RUnlock() + + // Return a copy of metrics + metrics := make([]QueryMetric, len(m.metrics)) + copy(metrics, m.metrics) + + return metrics +} diff --git a/internal/repository/pg/params.go b/internal/repository/pg/params.go new file mode 100644 index 0000000..26ba1a5 --- /dev/null +++ b/internal/repository/pg/params.go @@ -0,0 +1,35 @@ +package pg + +import "time" + +// ListParams represents the parameters for listing items in a repository. +// It includes filters for IP address, user agent, anomaly status, time range, +// pagination, and sorting. +type ListParams struct { + IP string + UserAgent string + HasAnomaly bool + TimeRange TimeRange + Pagination PaginationParams + SortParams SortParams +} + +// TimeRange represents a range of time, with a Start and End time. +type TimeRange struct { + Start time.Time + End time.Time +} + +// PaginationParams represents the parameters for pagination, including the +// limit of items to return and the offset to start from. +type PaginationParams struct { + Limit int + Offset int +} + +// SortParams represents the parameters for sorting a list of items, including +// the field to sort by and the sort direction (ASC or DESC). +type SortParams struct { + Field string + Direction string // ASC or DESC +} diff --git a/internal/secrets/encryption/encryption.go b/internal/secrets/encryption/encryption.go new file mode 100644 index 0000000..591362f --- /dev/null +++ b/internal/secrets/encryption/encryption.go @@ -0,0 +1,262 @@ +package encryption + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + + "github.com/hyp3rd/ewrap/pkg/ewrap" + "golang.org/x/crypto/scrypt" +) + +const ( + // KeyLength is the length of the key used for encryption. + KeyLength = 32 + // ResourceCost is the cost of the scrypt key derivation function. + ResourceCost = 1 << 15 + // BlockSize is the block size of the cipher. + BlockSize = 8 +) + +// Metadata holds the parameters needed for decryption. +type Metadata struct { + Version int `json:"v"` // Version of the encryption format + Salt []byte `json:"s"` // Salt used for key derivation + Params KeyDerivationParams `json:"p"` // Key derivation parameters + Nonce []byte `json:"n"` // Nonce used for encryption + Ciphertext []byte `json:"c"` // The encrypted data +} + +// KeyDerivationParams defines the parameters for key derivation using scrypt. +type KeyDerivationParams struct { + // Salt []byte // Salt for key derivation + N int `json:"n"` // CPU/memory cost parameter (must be power of 2) + R int `json:"r"` // Block size parameter + P int `json:"p"` // Parallelization parameter + KeyLen int `json:"kl"` // Length of the derived key +} + +// DefaultParams returns secure default parameters for key derivation. +func DefaultParams() KeyDerivationParams { + return KeyDerivationParams{ + // Salt: make([]byte, KeyLength), // 32-byte salt + N: ResourceCost, // CPU/memory cost (32768) + R: BlockSize, // Block size + P: 1, // Parallelization + KeyLen: KeyLength, // 256-bit key + } +} + +// Cryptographer handles encryption and decryption of secrets. +type Cryptographer struct { + mu sync.RWMutex + params KeyDerivationParams + password []byte +} + +// New creates a new Cryptographer instance. +func New(password string) (*Cryptographer, error) { + cryptographer := &Cryptographer{ + params: DefaultParams(), + } + + cryptographer.password = []byte(password) + + // Generate a random salt if not provided + // if _, err := io.ReadFull(rand.Reader, cryptographer.params.Salt); err != nil { + // return nil, ewrap.Wrapf(err, "generating random salt") + // } + + // Initialize the cryptographer with the password + // if err := cryptographer.Initialize(password); err != nil { + // return nil, err + // } + + return cryptographer, nil +} + +// Initialize sets up the cryptographer with a password. +// func (c *Cryptographer) Initialize(password string) error { +// c.mu.Lock() +// defer c.mu.Unlock() + +// // Derive the encryption key from the password +// // key, err := c.deriveKey(password) +// // if err != nil { +// // return ewrap.Wrapf(err, "deriving encryption key") +// // } + +// // Create cipher block +// block, err := aes.NewCipher(key) +// if err != nil { +// return ewrap.Wrapf(err, "creating cipher block") +// } + +// // Create GCM mode +// gcm, err := cipher.NewGCM(block) +// if err != nil { +// return ewrap.Wrapf(err, "creating GCM mode") +// } + +// c.gcm = gcm +// c.masterKey = key +// c.initialized = true + +// return nil +// } + +// Encrypt encrypts a plaintext string and returns a formatted encrypted string. +func (c *Cryptographer) Encrypt(plaintext string) (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + // Generate a random salt + salt := make([]byte, KeyLength) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + return "", ewrap.Wrapf(err, "generating salt") + } + + // Derive the key + key, err := scrypt.Key(c.password, salt, c.params.N, c.params.R, c.params.P, c.params.KeyLen) + if err != nil { + return "", ewrap.Wrapf(err, "deriving key") + } + + // Create cipher + block, err := aes.NewCipher(key) + if err != nil { + return "", ewrap.Wrapf(err, "creating cipher") + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", ewrap.Wrapf(err, "creating GCM") + } + + // Generate nonce + nonce := make([]byte, gcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", ewrap.Wrapf(err, "generating nonce") + } + + // Encrypt the data + ciphertext := gcm.Seal(nil, nonce, []byte(plaintext), nil) + + // Create metadata + metadata := Metadata{ + Version: 1, + Salt: salt, + Params: c.params, + Nonce: nonce, + Ciphertext: ciphertext, + } + + // Serialize metadata to JSON + metadataJSON, err := json.Marshal(metadata) + if err != nil { + return "", ewrap.Wrapf(err, "marshaling metadata") + } + + // Encode everything in base64 + encoded := base64.StdEncoding.EncodeToString(metadataJSON) + + return fmt.Sprintf("ENC[%s]", encoded), nil +} + +// Decrypt decrypts a formatted encrypted string using the provided key. +func (c *Cryptographer) Decrypt(encryptedData string) (string, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + // Remove the ENC[] wrapper + if !strings.HasPrefix(encryptedData, "ENC[") || !strings.HasSuffix(encryptedData, "]") { + return "", ewrap.New("invalid encryption format") + } + + encoded := encryptedData[4 : len(encryptedData)-1] + + // Decode the base64 data + metadataJSON, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + return "", ewrap.Wrapf(err, "decoding base64") + } + + // Unmarshal metadata + var metadata Metadata + if err := json.Unmarshal(metadataJSON, &metadata); err != nil { + return "", ewrap.Wrapf(err, "unmarshaling metadata") + } + + // Derive the key using the stored parameters + key, err := scrypt.Key( + c.password, + metadata.Salt, + metadata.Params.N, + metadata.Params.R, + metadata.Params.P, + metadata.Params.KeyLen, + ) + if err != nil { + return "", ewrap.Wrapf(err, "deriving key") + } + + // Create cipher + block, err := aes.NewCipher(key) + if err != nil { + return "", ewrap.Wrapf(err, "creating cipher") + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return "", ewrap.Wrapf(err, "creating GCM") + } + + // Decrypt the data + plaintext, err := gcm.Open(nil, metadata.Nonce, metadata.Ciphertext, nil) + if err != nil { + return "", ewrap.Wrapf(err, "decrypting data") + } + + return string(plaintext), nil +} + +// func (c *Cryptographer) deriveKey(password string) ([]byte, error) { +// bytes, err := scrypt.Key( +// []byte(password), +// c.params.Salt, +// c.params.N, +// c.params.R, +// c.params.P, +// c.params.KeyLen, +// ) +// if err != nil { +// return nil, ewrap.Wrapf(err, "error deriving key") +// } + +// return bytes, nil +// } + +// // RotateKey safely rotates the encryption key. +// func (c *Cryptographer) RotateKey(newPassword string) error { +// c.mu.Lock() +// defer c.mu.Unlock() + +// // Create a temporary cryptographer with the new key +// newCrypto, err := New(newPassword) +// if err != nil { +// return ewrap.Wrapf(err, "creating new cryptographer") +// } + +// // Update the current cryptographer with the new key +// c.gcm = newCrypto.gcm +// c.params = newCrypto.params +// c.masterKey = newCrypto.masterKey + +// return nil +// } diff --git a/internal/secrets/manager.go b/internal/secrets/manager.go new file mode 100644 index 0000000..bddd82c --- /dev/null +++ b/internal/secrets/manager.go @@ -0,0 +1,97 @@ +package secrets + +import ( + "context" + "sync" + + "github.com/hyp3rd/base/internal/constants" + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +// Manager is the main struct responsible for managing secrets in the application. +// It holds a reference to the secrets store and the provider that retrieves the secrets. +// The Manager is thread-safe and uses a read-write mutex to protect the secrets store. +type Manager struct { + Provider Provider + store *Store + mu sync.RWMutex +} + +// NewManager creates a new Manager instance with the provided Provider. +// The Manager is responsible for managing secrets in the application. +func NewManager(provider Provider) *Manager { + return &Manager{ + Provider: provider, + store: &Store{}, + } +} + +// Load loads the secrets from the provider and stores them in the Manager's secrets store. +// It first loads the database credentials, then the API keys, and finally validates the loaded secrets. +// If any error occurs during the loading process, the function will return the error. +func (m *Manager) Load(ctx context.Context) error { + m.mu.Lock() + defer m.mu.Unlock() + + // Load database credentials + if err := m.loadSecret(ctx, constants.DBUsername.String(), &m.store.DBCredentials.Username); err != nil { + return err + } + + if err := m.loadSecret(ctx, constants.DBPassword.String(), &m.store.DBCredentials.Password); err != nil { + return err + } + + // Load other secrets + // ... + + return m.validate() +} + +// GetStore returns a copy of the Manager's secrets store to prevent external modifications. +// The returned store is a deep copy, so changes to the copy will not affect the original store. +// The method acquires a read lock on the Manager's mutex to ensure thread-safety. +func (m *Manager) GetStore() *Store { + m.mu.RLock() + defer m.mu.RUnlock() + + // Return a copy to prevent external modifications + storeCopy := *m.store + + return &storeCopy +} + +// SetStore sets the Manager's secrets store to the provided value. +// The method acquires a write lock on the Manager's mutex to ensure thread-safety. +// It returns the updated store. +func (m *Manager) SetStore(secrets *Store) *Store { + m.mu.Lock() + defer m.mu.Unlock() + + // Set the store to the provided value + m.store = secrets + + return m.store +} + +func (m *Manager) loadSecret(ctx context.Context, key string, target *string) error { + value, err := m.Provider.GetSecret(ctx, key) + if err != nil { + return ewrap.Wrapf(err, "loading secret"). + WithMetadata("key", key) + } + + *target = value + + return nil +} + +func (m *Manager) validate() error { + if m.store.DBCredentials.Username == "" || m.store.DBCredentials.Password == "" { + return ewrap.New("database credentials are required") + } + + // Validate other secrets here + + return nil +} diff --git a/internal/secrets/providers/aws/provider.go b/internal/secrets/providers/aws/provider.go new file mode 100644 index 0000000..e54baa9 --- /dev/null +++ b/internal/secrets/providers/aws/provider.go @@ -0,0 +1,174 @@ +package aws + +import ( + "context" + "encoding/json" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + "github.com/hyp3rd/base/internal/constants" + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +// Config holds the configuration for the AWS Secrets Manager provider. +type Config struct { + // Region is the AWS region where secrets are stored. + Region string + // BasePath is a prefix added to all secret names. + BasePath string + // MaxRetries is the number of retries for failed operations. + MaxRetries int + // Timeout for AWS operations. + Timeout time.Duration +} + +// Provider implements the secrets.Provider interface for AWS Secrets Manager. +type Provider struct { + client *secretsmanager.Client + config Config + mu sync.RWMutex + retryDelay time.Duration +} + +// New creates a new AWS Secrets Manager provider instance. +func New(ctx context.Context, cfg Config) (*Provider, error) { + if cfg.Timeout == 0 { + cfg.Timeout = constants.DefaultTimeout + } + + if cfg.MaxRetries == 0 { + cfg.MaxRetries = 3 + } + + // Load AWS configuration + awsCfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(cfg.Region), + config.WithRetryMaxAttempts(cfg.MaxRetries), + ) + if err != nil { + return nil, ewrap.Wrapf(err, "loading AWS config") + } + + return &Provider{ + client: secretsmanager.NewFromConfig(awsCfg), + config: cfg, + retryDelay: 1 * time.Second, + }, nil +} + +// GetSecret retrieves a secret from AWS Secrets Manager. +func (p *Provider) GetSecret(ctx context.Context, key string) (string, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + secretName := p.buildSecretName(key) + + // Create a context with timeout + ctx, cancel := context.WithTimeout(ctx, p.config.Timeout) + defer cancel() + + input := &secretsmanager.GetSecretValueInput{ + SecretId: &secretName, + } + + // Get the secret value + result, err := p.client.GetSecretValue(ctx, input) + if err != nil { + return "", ewrap.Wrapf(err, "retrieving secret"). + WithMetadata("key", key) + } + + // Parse the secret value + return p.parseSecretValue(result.SecretString, key) +} + +// SetSecret stores a secret in AWS Secrets Manager. +func (p *Provider) SetSecret(ctx context.Context, key, value string) error { + p.mu.Lock() + defer p.mu.Unlock() + + secretName := p.buildSecretName(key) + + // Create a context with timeout + ctx, cancel := context.WithTimeout(ctx, p.config.Timeout) + defer cancel() + + // Create the secret value structure + secretValue := map[string]string{ + "value": value, + } + + // Convert to JSON + secretString, err := json.Marshal(secretValue) + if err != nil { + return ewrap.Wrapf(err, "marshaling secret value"). + WithMetadata("key", key) + } + + // Check if the secret already exists + _, err = p.client.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{ + SecretId: &secretName, + }) + + if err == nil { + // Update existing secret + input := &secretsmanager.PutSecretValueInput{ + SecretId: &secretName, + SecretString: aws.String(string(secretString)), + } + + _, err = p.client.PutSecretValue(ctx, input) + if err != nil { + return ewrap.Wrapf(err, "updating secret"). + WithMetadata("key", key) + } + } else { + // Create new secret + input := &secretsmanager.CreateSecretInput{ + Name: &secretName, + SecretString: aws.String(string(secretString)), + } + + _, err = p.client.CreateSecret(ctx, input) + if err != nil { + return ewrap.Wrapf(err, "creating secret"). + WithMetadata("key", key) + } + } + + return nil +} + +// buildSecretName constructs the full name for a secret in AWS Secrets Manager. +func (p *Provider) buildSecretName(key string) string { + if p.config.BasePath == "" { + return key + } + + return p.config.BasePath + "/" + key +} + +// parseSecretValue extracts the value from a JSON-encoded secret. +func (p *Provider) parseSecretValue(secretString *string, key string) (string, error) { + if secretString == nil { + return "", ewrap.New("empty secret value"). + WithMetadata("key", key) + } + + var secretData map[string]string + if err := json.Unmarshal([]byte(*secretString), &secretData); err != nil { + return "", ewrap.Wrapf(err, "parsing secret value"). + WithMetadata("key", key) + } + + value, ok := secretData["value"] + if !ok { + return "", ewrap.New("invalid secret format"). + WithMetadata("key", key) + } + + return value, nil +} diff --git a/internal/secrets/providers/azure/provider.go b/internal/secrets/providers/azure/provider.go new file mode 100644 index 0000000..59e0cf0 --- /dev/null +++ b/internal/secrets/providers/azure/provider.go @@ -0,0 +1,214 @@ +package azure + +import ( + "context" + "fmt" + "path" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + + "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets" + "github.com/hyp3rd/base/internal/constants" + "github.com/hyp3rd/ewrap/pkg/ewrap" +) + +// Config holds the configuration for the Azure Key Vault provider. +type Config struct { + // VaultName is the name of the Azure Key Vault. + VaultName string + // TenantID is the Azure AD tenant ID. + TenantID string + // ClientID is the Azure AD application (client) ID. + ClientID string + // ClientSecret is the Azure AD application client secret. + ClientSecret string + // UseManagedIdentity indicates whether to use Azure Managed Identity. + UseManagedIdentity bool + // Timeout for Azure operations. + Timeout time.Duration + // MaxRetries is the number of retries for failed operations. + MaxRetries int + // Tags to apply to secrets (key-value pairs). + Tags map[string]*string +} + +// Provider implements the secrets.Provider interface for Azure Key Vault. +type Provider struct { + client *azsecrets.Client + config Config + mu sync.RWMutex + retryDelay time.Duration +} + +// New creates a new Azure Key Vault provider instance. +func New(_ context.Context, cfg Config) (*Provider, error) { + if cfg.Timeout == 0 { + cfg.Timeout = constants.DefaultTimeout + } + + if cfg.MaxRetries == 0 { + cfg.MaxRetries = 3 + } + + var ( + cred azcore.TokenCredential + err error + ) + + if cfg.UseManagedIdentity { + // Use managed identity authentication + cred, err = azidentity.NewDefaultAzureCredential(nil) + } else { + // Use service principal authentication + cred, err = azidentity.NewClientSecretCredential( + cfg.TenantID, + cfg.ClientID, + cfg.ClientSecret, + nil, + ) + } + + if err != nil { + return nil, ewrap.Wrapf(err, "creating Azure credentials") + } + + // Create Key Vault client + vaultURL := fmt.Sprintf("https://%s.vault.azure.net/", cfg.VaultName) + + client, err := azsecrets.NewClient(vaultURL, cred, nil) + if err != nil { + return nil, ewrap.Wrapf(err, "creating Key Vault client") + } + + return &Provider{ + client: client, + config: cfg, + retryDelay: 1 * time.Second, + }, nil +} + +// GetSecret retrieves a secret from Azure Key Vault. +func (p *Provider) GetSecret(ctx context.Context, key string) (string, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + ctx, cancel := context.WithTimeout(ctx, p.config.Timeout) + defer cancel() + + var ( + resp azsecrets.GetSecretResponse + err error + ) + + // Implement retry logic with exponential backoff + for attempt := 0; attempt <= p.config.MaxRetries; attempt++ { + resp, err = p.client.GetSecret(ctx, key, "", nil) + if err == nil { + return *resp.Value, nil + } + + if attempt == p.config.MaxRetries { + return "", ewrap.Wrapf(err, "retrieving secret"). + WithMetadata("key", key). + WithMetadata("attempt", attempt+1) + } + + time.Sleep(p.retryDelay * time.Duration(1< 0 { + vaultConfig.Timeout = cfg.Timeout + } + + // Create Vault client + client, err := api.NewClient(vaultConfig) + if err != nil { + return nil, ewrap.Wrapf(err, "creating Vault client") + } + + // Set auth token + client.SetToken(cfg.Token) + + // Set namespace if provided (Vault Enterprise feature) + if cfg.Namespace != "" { + client.SetNamespace(cfg.Namespace) + } + + return &Provider{ + client: client, + config: cfg, + retryDelay: 1 * time.Second, + }, nil +} + +// GetSecret retrieves a secret from Vault with retry logic. +func (p *Provider) GetSecret(ctx context.Context, key string) (string, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + var ( + secret *api.KVSecret + err error + ) + + // Build the full path for the secret + secretPath := p.buildSecretPath(key) + + // Implement retry logic with exponential backoff + for attempt := 0; attempt <= p.config.MaxRetries; attempt++ { + select { + case <-ctx.Done(): + return "", ewrap.Wrap(ctx.Err(), "context canceled") + default: + // Read the secret from Vault + secret, err = p.client.KVv2(p.config.MountPath).Get(ctx, secretPath) + if err == nil && secret != nil { + return p.extractSecretValue(secret, key) + } + + // Check if we should retry + if attempt == p.config.MaxRetries { + return "", ewrap.Wrapf(err, "failed to retrieve secret after %d attempts", attempt+1). + WithMetadata("path", secretPath) + } + + // Wait before retrying with exponential backoff + time.Sleep(p.retryDelay * time.Duration(1<