diff --git a/README.md b/README.md index 7736138..6faa0f5 100644 --- a/README.md +++ b/README.md @@ -2,17 +2,110 @@ [![Go](https://github.com/Morebec/specter/actions/workflows/go.yml/badge.svg)](https://github.com/Morebec/specter/actions/workflows/go.yml) -Specter is a development toolkit in Go that allows you to develop configuration file processors based on -HashiCorp Configuration Language (HCL). With Specter, you can define your own Domain-Specific Language (DSL) -using HCL and create a processing pipeline to validate, lint, resolve dependencies, and generate code or artifact -files from these DSL configuration files. +Specter is a Go library designed to help developers easily build declarative DSLs (Domain-Specific Languages) and +process them through an extensible pipeline. -## Features +It is currently used at [Morébec](https://morebec.com) for generating microservice APIs, code and documentation, +managing configurations, automating deployments, and so many other fun things. -- Develop your own DSL using HCL -- Validate and lint configuration files -- Resolve dependencies between configuration files -- Generate code or artifact files from configuration files +Specter provides a simple yet powerful framework to simplify these workflows. + +The library also comes with many batteries included for common tasks such as dependency resolution +and linting, HCL configuration loading and more. + +## Key Use Cases + +At [Morébec](https://morebec.com) Specter is primarily used to create high-level, syntax-consistent DSLs for tools +like OpenAPI, Docker/Docker Compose, and Terraform. + +Here are some of the key use cases Specter powers for us internally: + +- **Code Generation:** We generate entire code bases in PHP and Go leveraging DDD/CQRS/ES in a low-code manner to focus on business logic and +reduce plumbing work. +- **Enforce Coding Standards**: We ensure consistency and improve development speed by automating code quality checks and + standardization. +- **Configuration Management:** We use it to manage environment-specific configuration files, such as Docker or + Kubernetes manifests, using declarative Units. +- **CI/CD Automation:** Automate the generation of CI/CD pipeline definitions (Jenkins, GitHub Actions, etc.) + by processing high-level declarative Units. +- **Infrastructure as Code:** Describe infrastructure components declaratively and generate Terraform, + scripts, or other IAC artifacts. + + +## How Specter Works +Specter is based around a simple yet powerful *pipeline* architecture. The core of the pipeline is designed to process +*Units* — which are declarative components that represent different aspects or concepts — and produce various types +of outputs based on them called *artifacts*. + +In short, specter loads Units which it processes before outputting corresponding artifacts. + +For example, in the case of our Go code generator, we first define Microservices with their Commands, Events +and Queries in specification files that are then processed by Specter and transformed into their +corresponding Go implementation along with a changelog, markdown documentation and OpenAPI specification. + +In this example, the Microservice/Command/Event/Query definition files are the "Units", while the +generated code, markdown documentation, changelog, and OpenAPI are the "artifacts". + +Units are anything that needs transforming, and artifacts are anything these units can be transformed into. + +To illustrate, here's an example of a Unit File that could describe a docker container to be deployed on a +given host using an HCL syntax: + +``` +service "web" { + image = "our-app:latest" + ports = ["8080:80"] + volumes = [ + { + type = "bind" + source = "./html" + target = "/usr/share/nginx/html" + } + ] + deploymentHost = "primary-us-east-dev" +} +``` + +### Pipeline Stages +The pipeline consists of several stages, each responsible for a specific task in the workflow. +Here's an overview of the stages and the concepts they introduce: + +### 1. Source Loading +The very first step is to acquire these units. Depending on the use cases these units could come from files, HTTP resources, +or even Database rows. These different locations are known in Specter as Unit Sources. + +As such, the Source Loading stage corresponds to loading these sources so that they can be acquired/fetched +and read. + +- Inputs: Source locations +- Outputs: (Loaded) Sources + +### 2. Unit Loading +Units are read and materialized into in-memory data structures. This stage converts raw source data into +usable Units that can be processed according to your specific needs. + +- Inputs: Sources +- Outputs: (Loaded) Units + +### 3. Unit Processing +Once the Units are loaded, Specter applies processors which are the core services responsible for generating artifacts +based on these units. These processors can do things like validate the Units, resolve dependencies, or convert them +into different representations. You can easily add custom processors to extend Specter's behavior. + +The artifacts are in-memory representations of the actual desired outputs. For instance, the FileArtifact represents +a file to be outputted. + +- Inputs: Sources +- Outputs: Artifacts + +### 4. Artifact Processing +The final stage of the pipeline processes artifacts that were generated during the previous step. +The processing of these artifacts can greatly vary based on the types of artifacts at play. +An artifact could be anything from a file, an API call, to a database insertion or update query, +to a command or program to be executed. + +- Inputs: Artifacts +- Outputs: Final outputs (files, API calls, etc.) ## Getting Started diff --git a/pkg/specter/artifactproc.go b/pkg/specter/artifactproc.go index 014e593..94405e7 100644 --- a/pkg/specter/artifactproc.go +++ b/pkg/specter/artifactproc.go @@ -16,15 +16,17 @@ package specter import ( "context" - "encoding/json" - "github.com/morebec/go-errors/errors" - "io/fs" - "os" - "slices" - "sync" - "time" ) +// ArtifactProcessor are services responsible for processing artifacts of UnitProcessors. +type ArtifactProcessor interface { + // Process performs the processing of artifacts generated by UnitProcessor. + Process(ctx ArtifactProcessingContext) error + + // Name returns the name of this processor. + Name() string +} + // ArtifactRegistry provides an interface for managing a registry of artifacts. This // registry tracks artifacts generated during processing runs, enabling clean-up // in subsequent runs to avoid residual artifacts and maintain a clean slate. @@ -97,199 +99,18 @@ type ArtifactProcessingContext struct { context.Context Units UnitGroup Artifacts []Artifact - Logger Logger ArtifactRegistry ProcessorArtifactRegistry processorName string } -var _ ArtifactRegistry = (*InMemoryArtifactRegistry)(nil) - -// InMemoryArtifactRegistry maintains a registry in memory. -// It can be useful for tests. -type InMemoryArtifactRegistry struct { - EntriesMap map[string][]ArtifactRegistryEntry - mu sync.RWMutex // Mutex to protect concurrent access -} - -func (r *InMemoryArtifactRegistry) Add(processorName string, e ArtifactRegistryEntry) error { - if processorName == "" { - return errors.NewWithMessage(errors.InternalErrorCode, "processor name is required") - } - if e.ArtifactID == "" { - return errors.NewWithMessage(errors.InternalErrorCode, "artifact id is required") - } - - r.mu.Lock() - defer r.mu.Unlock() - - if r.EntriesMap == nil { - r.EntriesMap = map[string][]ArtifactRegistryEntry{} - } - - if _, ok := r.EntriesMap[processorName]; !ok { - r.EntriesMap[processorName] = make([]ArtifactRegistryEntry, 0) - } - - for i, entry := range r.EntriesMap[processorName] { - if entry.ArtifactID == e.ArtifactID { - r.EntriesMap[processorName] = slices.Delete(r.EntriesMap[processorName], i, i+1) - } - } - - r.EntriesMap[processorName] = append(r.EntriesMap[processorName], e) - - return nil -} - -func (r *InMemoryArtifactRegistry) Remove(processorName string, artifactID ArtifactID) error { - r.mu.Lock() - defer r.mu.Unlock() - - if processorName == "" { - return errors.NewWithMessage(errors.InternalErrorCode, "processor name is required") - } - if artifactID == "" { - return errors.NewWithMessage(errors.InternalErrorCode, "artifact id is required") - } - - if _, ok := r.EntriesMap[processorName]; !ok { - return nil - } - - var artifacts []ArtifactRegistryEntry - for _, entry := range r.EntriesMap[processorName] { - if entry.ArtifactID != artifactID { - artifacts = append(artifacts, entry) - } - } - - r.EntriesMap[processorName] = artifacts - - return nil -} - -func (r *InMemoryArtifactRegistry) FindByID(processorName string, artifactID ArtifactID) (entry ArtifactRegistryEntry, found bool, err error) { - all, _ := r.FindAll(processorName) - - for _, e := range all { - if e.ArtifactID == artifactID { - return e, true, nil - } - } - - return ArtifactRegistryEntry{}, false, nil -} - -func (r *InMemoryArtifactRegistry) FindAll(processorName string) ([]ArtifactRegistryEntry, error) { - if r.EntriesMap == nil { - return nil, nil - } - - values, ok := r.EntriesMap[processorName] - if !ok { - return nil, nil - } - - return values, nil -} - -func (r *InMemoryArtifactRegistry) Load() error { return nil } - -func (r *InMemoryArtifactRegistry) Save() error { return nil } - -const DefaultJSONArtifactRegistryFileName = ".specter.json" - -type JSONArtifactRegistryRepresentation struct { - GeneratedAt time.Time `json:"generatedAt"` - EntriesMap map[string][]JSONArtifactRegistryEntry `json:"entries"` +type ArtifactProcessorFunc struct { + name string + processFunc func(ctx ArtifactProcessingContext) error } -type JSONArtifactRegistryEntry struct { - ArtifactID string `json:"artifactId"` - Metadata map[string]any `json:"metadata"` +func (a ArtifactProcessorFunc) Process(ctx ArtifactProcessingContext) error { + return a.processFunc(ctx) } -var _ ArtifactRegistry = (*JSONArtifactRegistry)(nil) - -// JSONArtifactRegistry implementation of a ArtifactRegistry that is saved as a JSON file. -type JSONArtifactRegistry struct { - *InMemoryArtifactRegistry - FileSystem FileSystem - FilePath string - TimeProvider TimeProvider - - mu sync.RWMutex // Mutex to protect concurrent access -} - -func (r *JSONArtifactRegistry) Load() error { - r.mu.Lock() - defer r.mu.Unlock() - - bytes, err := r.FileSystem.ReadFile(r.FilePath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.WrapWithMessage(err, errors.InternalErrorCode, "failed loading artifact file registry") - } - - // empty file is okay - if len(bytes) == 0 { - return nil - } - - repr := &JSONArtifactRegistryRepresentation{} - - if err := json.Unmarshal(bytes, repr); err != nil { - return errors.WrapWithMessage(err, errors.InternalErrorCode, "failed loading artifact file registry") - } - - for processorName, entries := range repr.EntriesMap { - for _, entry := range entries { - if err := r.InMemoryArtifactRegistry.Add(processorName, ArtifactRegistryEntry{ - ArtifactID: ArtifactID(entry.ArtifactID), - Metadata: entry.Metadata, - }); err != nil { - return err - } - } - } - - return nil -} - -func (r *JSONArtifactRegistry) Save() error { - r.mu.RLock() - defer r.mu.RUnlock() - - repr := JSONArtifactRegistryRepresentation{ - GeneratedAt: r.TimeProvider(), - EntriesMap: make(map[string][]JSONArtifactRegistryEntry, len(r.InMemoryArtifactRegistry.EntriesMap)), - } - - // Add entries to representation - for processorName, entries := range r.InMemoryArtifactRegistry.EntriesMap { - repr.EntriesMap[processorName] = nil - for _, entry := range entries { - repr.EntriesMap[processorName] = append(repr.EntriesMap[processorName], JSONArtifactRegistryEntry{ - ArtifactID: string(entry.ArtifactID), - Metadata: entry.Metadata, - }) - } - } - - // Set generation date - repr.GeneratedAt = r.TimeProvider() - - // Generate a JSON file containing all artifact files for clean up later on - js, err := json.MarshalIndent(repr, "", " ") - if err != nil { - return errors.Wrap(err, "failed generating artifact file registry") - } - if err := r.FileSystem.WriteFile(r.FilePath, js, fs.ModePerm); err != nil { - return errors.Wrap(err, "failed generating artifact file registry") - } - - return nil -} +func (a ArtifactProcessorFunc) Name() string { return a.name } diff --git a/pkg/specter/artifactproc_fileartifact.go b/pkg/specter/artifactproc_fileartifact.go index c253e78..7f9d496 100644 --- a/pkg/specter/artifactproc_fileartifact.go +++ b/pkg/specter/artifactproc_fileartifact.go @@ -35,9 +35,7 @@ const ( WriteOnceMode WriteMode = "WRITE_ONCE" ) -const DefaultWriteMode WriteMode = WriteOnceMode - -var _ Artifact = (*FileArtifact)(nil) +const DefaultWriteMode = WriteOnceMode // FileArtifact is a data structure that can be used by a UnitProcessor to generate file artifacts // that can be written by the FileArtifactProcessor. @@ -84,19 +82,14 @@ func (p FileArtifactProcessor) Process(ctx ArtifactProcessingContext) error { } if err := p.cleanRegistry(ctx); err != nil { - ctx.Logger.Error(fmt.Sprintf("failed cleaning artifact registry: %s", err.Error())) return errors.Wrap(err, FileArtifactProcessorCleanUpFailedErrorCode) } // Write files concurrently to speed up process. - ctx.Logger.Info("Writing file artifacts ...") if err := p.processArtifacts(ctx, files); err != nil { - ctx.Logger.Error(fmt.Sprintf("failed processing artifacts: %s", err.Error())) return errors.Wrap(err, FileArtifactProcessorProcessingFailedErrorCode) } - ctx.Logger.Success("Files artifacts written successfully.") - return nil } @@ -119,7 +112,6 @@ func (p FileArtifactProcessor) processArtifacts(ctx ArtifactProcessingContext, f // We delegate the responsibility of the caller to have provided the directories in the right order. for _, d := range directories { if err := p.processFileArtifact(ctx, d); err != nil { - ctx.Logger.Error(fmt.Sprintf("failed writing directory artifact at %q: %s", d.Path, err)) errs = errs.Append(err) } } @@ -132,7 +124,6 @@ func (p FileArtifactProcessor) processArtifacts(ctx ArtifactProcessingContext, f go func(file *FileArtifact) { defer wg.Done() if err := p.processFileArtifact(ctx, file); err != nil { - ctx.Logger.Error(fmt.Sprintf("failed writing artifact file at %q: %s", file.Path, err)) mu.Lock() errs = errs.Append(err) mu.Unlock() @@ -160,7 +151,6 @@ func (p FileArtifactProcessor) findFileArtifactsFromContext(ctx ArtifactProcessi func (p FileArtifactProcessor) processFileArtifact(ctx ArtifactProcessingContext, fa *FileArtifact) error { if fa.WriteMode == "" { - ctx.Logger.Trace(fmt.Sprintf("File artifact %q does not have a write mode, defaulting to %q", fa.ID(), DefaultWriteMode)) fa.WriteMode = DefaultWriteMode } @@ -191,19 +181,15 @@ func (p FileArtifactProcessor) processFileArtifact(ctx ArtifactProcessingContext // At this point if the file still already exists, this means that the clean step has not // been executed properly. if fileExists { - ctx.Logger.Trace(fmt.Sprintf("the cleanup process failed without being caught: file for %q still exists", fa.ID())) return errors.NewWithMessage(FileArtifactProcessorCleanUpFailedErrorCode, "the cleanup process failed without being caught") } - if fa.IsDir() { - ctx.Logger.Info(fmt.Sprintf("Creating directory %q ...", filePath)) - ctx.Logger.Trace(fmt.Sprintf("making directory %q for %q ...", filePath, fa.ID())) + switch { + case fa.IsDir(): if err := p.FileSystem.Mkdir(filePath, fs.ModePerm); err != nil { return err } - } else { - ctx.Logger.Info(fmt.Sprintf("Writing file %q ...", filePath)) - ctx.Logger.Trace(fmt.Sprintf("creating file %q for %q ...", filePath, fa.ID())) + default: if err := p.FileSystem.WriteFile(filePath, fa.Data, fs.ModePerm); err != nil { return err } @@ -223,7 +209,6 @@ func (p FileArtifactProcessor) cleanRegistry(ctx ArtifactProcessingContext) erro var wg sync.WaitGroup var errs errors.Group - ctx.Logger.Info("Cleaning file artifacts ...") entries, err := ctx.ArtifactRegistry.FindAll() if err != nil { return err @@ -231,7 +216,6 @@ func (p FileArtifactProcessor) cleanRegistry(ctx ArtifactProcessingContext) erro var validArtifacts []FileArtifact - ctx.Logger.Trace("Validating file artifact registry entries before cleanup ...") // Validate registry entries before cleanup to reduce the chances of a partial cleanup for _, entry := range entries { if entry.Metadata == nil { @@ -244,7 +228,6 @@ func (p FileArtifactProcessor) cleanRegistry(ctx ArtifactProcessingContext) erro path, ok := entry.Metadata["path"].(string) if !ok || path == "" { // should never happen because of pre validation - ctx.Logger.Trace(fmt.Sprintf("invalid registry entry %q: no path", entry.ArtifactID)) return errors.NewWithMessage( FileArtifactProcessorCleanUpFailedErrorCode, fmt.Sprintf("invalid registry entry %q: no path", entry.ArtifactID), @@ -261,7 +244,6 @@ func (p FileArtifactProcessor) cleanRegistry(ctx ArtifactProcessingContext) erro writeMode := WriteMode(writeModeStr) if writeMode != RecreateMode { - ctx.Logger.Trace(fmt.Sprintf("registry entry %q has write mode %q, it will not be cleaned up", entry.ArtifactID, writeModeStr)) continue } @@ -272,7 +254,6 @@ func (p FileArtifactProcessor) cleanRegistry(ctx ArtifactProcessingContext) erro } // Proceed with cleanup - ctx.Logger.Trace("Cleaning up files of registry entries ...") for _, artifact := range validArtifacts { wg.Add(1) go func(artifact FileArtifact) { @@ -290,7 +271,6 @@ func (p FileArtifactProcessor) cleanRegistry(ctx ArtifactProcessingContext) erro } func (p FileArtifactProcessor) cleanArtifact(ctx ArtifactProcessingContext, artifact FileArtifact) error { - ctx.Logger.Info(fmt.Sprintf("cleaning file artifact %q ...", artifact.ID())) // In the case of errors here, we'd like roll back and put the registry in the same state as it was // to avoid having orphaned files on the file system. diff --git a/pkg/specter/artifactproc_fileartifact_test.go b/pkg/specter/artifactproc_fileartifact_test.go index 13ffd59..4c9101b 100644 --- a/pkg/specter/artifactproc_fileartifact_test.go +++ b/pkg/specter/artifactproc_fileartifact_test.go @@ -401,7 +401,6 @@ func TestWriteFileArtifactProcessor_Process(t *testing.T) { ctx := specter.ArtifactProcessingContext{ Context: parentCtx, Artifacts: tt.when.artifacts, - Logger: specter.NewDefaultLogger(specter.DefaultLoggerConfig{}), ArtifactRegistry: specter.NewProcessorArtifactRegistry(processor.Name(), registry), } err := processor.Process(ctx) diff --git a/pkg/specter/artifactproc_registry_inmemory.go b/pkg/specter/artifactproc_registry_inmemory.go new file mode 100644 index 0000000..59645be --- /dev/null +++ b/pkg/specter/artifactproc_registry_inmemory.go @@ -0,0 +1,114 @@ +// Copyright 2024 Morébec +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specter + +import ( + "github.com/morebec/go-errors/errors" + "slices" + "sync" +) + +// InMemoryArtifactRegistry maintains a registry in memory. +// It can be useful for tests. +type InMemoryArtifactRegistry struct { + EntriesMap map[string][]ArtifactRegistryEntry + mu sync.RWMutex // Mutex to protect concurrent access +} + +func (r *InMemoryArtifactRegistry) Add(processorName string, e ArtifactRegistryEntry) error { + if processorName == "" { + return errors.NewWithMessage(errors.InternalErrorCode, "processor name is required") + } + if e.ArtifactID == "" { + return errors.NewWithMessage(errors.InternalErrorCode, "artifact id is required") + } + + r.mu.Lock() + defer r.mu.Unlock() + + if r.EntriesMap == nil { + r.EntriesMap = map[string][]ArtifactRegistryEntry{} + } + + if _, ok := r.EntriesMap[processorName]; !ok { + r.EntriesMap[processorName] = make([]ArtifactRegistryEntry, 0) + } + + for i, entry := range r.EntriesMap[processorName] { + if entry.ArtifactID == e.ArtifactID { + r.EntriesMap[processorName] = slices.Delete(r.EntriesMap[processorName], i, i+1) + } + } + + r.EntriesMap[processorName] = append(r.EntriesMap[processorName], e) + + return nil +} + +func (r *InMemoryArtifactRegistry) Remove(processorName string, artifactID ArtifactID) error { + r.mu.Lock() + defer r.mu.Unlock() + + if processorName == "" { + return errors.NewWithMessage(errors.InternalErrorCode, "processor name is required") + } + if artifactID == "" { + return errors.NewWithMessage(errors.InternalErrorCode, "artifact id is required") + } + + if _, ok := r.EntriesMap[processorName]; !ok { + return nil + } + + var artifacts []ArtifactRegistryEntry + for _, entry := range r.EntriesMap[processorName] { + if entry.ArtifactID != artifactID { + artifacts = append(artifacts, entry) + } + } + + r.EntriesMap[processorName] = artifacts + + return nil +} + +func (r *InMemoryArtifactRegistry) FindByID(processorName string, artifactID ArtifactID) (entry ArtifactRegistryEntry, found bool, err error) { + all, _ := r.FindAll(processorName) + + for _, e := range all { + if e.ArtifactID == artifactID { + return e, true, nil + } + } + + return ArtifactRegistryEntry{}, false, nil +} + +func (r *InMemoryArtifactRegistry) FindAll(processorName string) ([]ArtifactRegistryEntry, error) { + if r.EntriesMap == nil { + return nil, nil + } + + values, ok := r.EntriesMap[processorName] + if !ok { + return nil, nil + } + + return values, nil +} + +func (r *InMemoryArtifactRegistry) Load() error { return nil } + +func (r *InMemoryArtifactRegistry) Save() error { return nil } diff --git a/pkg/specter/artifactproc_registry_json.go b/pkg/specter/artifactproc_registry_json.go new file mode 100644 index 0000000..3835fb0 --- /dev/null +++ b/pkg/specter/artifactproc_registry_json.go @@ -0,0 +1,118 @@ +// Copyright 2024 Morébec +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specter + +import ( + "encoding/json" + "github.com/morebec/go-errors/errors" + "io/fs" + "os" + "sync" + "time" +) + +const DefaultJSONArtifactRegistryFileName = ".specter.json" + +type JSONArtifactRegistryRepresentation struct { + GeneratedAt time.Time `json:"generatedAt"` + EntriesMap map[string][]JSONArtifactRegistryEntry `json:"entries"` +} + +type JSONArtifactRegistryEntry struct { + ArtifactID string `json:"artifactId"` + Metadata map[string]any `json:"metadata"` +} + +// JSONArtifactRegistry implementation of a ArtifactRegistry that is saved as a JSON file. +type JSONArtifactRegistry struct { + *InMemoryArtifactRegistry + FileSystem FileSystem + FilePath string + TimeProvider TimeProvider + + mu sync.RWMutex // Mutex to protect concurrent access +} + +func (r *JSONArtifactRegistry) Load() error { + r.mu.Lock() + defer r.mu.Unlock() + + bytes, err := r.FileSystem.ReadFile(r.FilePath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.WrapWithMessage(err, errors.InternalErrorCode, "failed loading artifact file registry") + } + + // empty file is okay + if len(bytes) == 0 { + return nil + } + + repr := &JSONArtifactRegistryRepresentation{} + + if err := json.Unmarshal(bytes, repr); err != nil { + return errors.WrapWithMessage(err, errors.InternalErrorCode, "failed loading artifact file registry") + } + + for processorName, entries := range repr.EntriesMap { + for _, entry := range entries { + if err := r.InMemoryArtifactRegistry.Add(processorName, ArtifactRegistryEntry{ + ArtifactID: ArtifactID(entry.ArtifactID), + Metadata: entry.Metadata, + }); err != nil { + return err + } + } + } + + return nil +} + +func (r *JSONArtifactRegistry) Save() error { + r.mu.RLock() + defer r.mu.RUnlock() + + repr := JSONArtifactRegistryRepresentation{ + GeneratedAt: r.TimeProvider(), + EntriesMap: make(map[string][]JSONArtifactRegistryEntry, len(r.InMemoryArtifactRegistry.EntriesMap)), + } + + // Add entries to representation + for processorName, entries := range r.InMemoryArtifactRegistry.EntriesMap { + repr.EntriesMap[processorName] = nil + for _, entry := range entries { + repr.EntriesMap[processorName] = append(repr.EntriesMap[processorName], JSONArtifactRegistryEntry{ + ArtifactID: string(entry.ArtifactID), + Metadata: entry.Metadata, + }) + } + } + + // Set generation date + repr.GeneratedAt = r.TimeProvider() + + // Generate a JSON file containing all artifact files for clean up later on + js, err := json.MarshalIndent(repr, "", " ") + if err != nil { + return errors.Wrap(err, "failed generating artifact file registry") + } + if err := r.FileSystem.WriteFile(r.FilePath, js, fs.ModePerm); err != nil { + return errors.Wrap(err, "failed generating artifact file registry") + } + + return nil +} diff --git a/pkg/specter/artifactproc_test.go b/pkg/specter/artifactproc_test.go new file mode 100644 index 0000000..0eae069 --- /dev/null +++ b/pkg/specter/artifactproc_test.go @@ -0,0 +1,28 @@ +package specter + +import ( + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestArtifactProcessorFunc(t *testing.T) { + t.Run("Name should be set", func(t *testing.T) { + a := NewArtifactProcessorFunc("name", func(ctx ArtifactProcessingContext) error { + return nil + }) + require.Equal(t, "name", a.Name()) + }) + + t.Run("Process should be called", func(t *testing.T) { + called := false + a := NewArtifactProcessorFunc("name", func(ctx ArtifactProcessingContext) error { + called = true + return assert.AnError + }) + + err := a.Process(ArtifactProcessingContext{}) + require.Error(t, err) + require.True(t, called) + }) +} diff --git a/pkg/specter/assembly.go b/pkg/specter/assembly.go index 718c556..a8a9103 100644 --- a/pkg/specter/assembly.go +++ b/pkg/specter/assembly.go @@ -14,82 +14,109 @@ package specter -import ( - "os" -) +type PipelineBuilder struct { + pipeline *DefaultPipeline + + SourceLoaders []SourceLoader + UnitLoaders []UnitLoader + UnitProcessors []UnitProcessor + ArtifactProcessors []ArtifactProcessor + ArtifactRegistry ArtifactRegistry + + SourceLoadingStageHooks SourceLoadingStageHooks + UnitLoadingStageHooks UnitLoadingStageHooks + UnitProcessingStageHooks UnitProcessingStageHooks + ArtifactProcessingStageHooks ArtifactProcessingStageHooks +} // NewPipeline creates a new instance of a *Pipeline using the provided options. -func NewPipeline(opts ...PipelineOption) *Pipeline { - s := &Pipeline{ - Logger: NewDefaultLogger(DefaultLoggerConfig{DisableColors: false, Writer: os.Stdout}), - TimeProvider: CurrentTimeProvider(), - } - for _, o := range opts { - o(s) +func NewPipeline() PipelineBuilder { + return PipelineBuilder{ + pipeline: &DefaultPipeline{ + TimeProvider: CurrentTimeProvider, + SourceLoadingStage: sourceLoadingStage{}, + UnitLoadingStage: unitLoadingStage{}, + UnitProcessingStage: unitProcessingStage{}, + ArtifactProcessingStage: artifactProcessingStage{}, + }, } - return s } -// PipelineOption represents an option to configure a Pipeline instance. -type PipelineOption func(s *Pipeline) +// WithSourceLoaders configures the SourceLoader of a Pipeline instance. +func (b PipelineBuilder) WithSourceLoaders(loaders ...SourceLoader) PipelineBuilder { + b.SourceLoaders = loaders + return b +} -// WithLogger configures the Logger of a Pipeline instance. -func WithLogger(l Logger) PipelineOption { - return func(p *Pipeline) { - p.Logger = l - } +// WithUnitLoaders configures the UnitLoader of a Pipeline instance. +func (b PipelineBuilder) WithUnitLoaders(loaders ...UnitLoader) PipelineBuilder { + b.UnitLoaders = loaders + return b } -// WithSourceLoaders configures the SourceLoader of a Pipeline instance. -func WithSourceLoaders(loaders ...SourceLoader) PipelineOption { - return func(p *Pipeline) { - p.SourceLoaders = append(p.SourceLoaders, loaders...) - } +// WithUnitProcessors configures the UnitProcess of a Pipeline instance. +func (b PipelineBuilder) WithUnitProcessors(processors ...UnitProcessor) PipelineBuilder { + b.UnitProcessors = processors + return b } -// WithLoaders configures the UnitLoader of a Pipeline instance. -func WithLoaders(loaders ...UnitLoader) PipelineOption { - return func(p *Pipeline) { - p.Loaders = append(p.Loaders, loaders...) - } +// WithArtifactProcessors configures the ArtifactProcessor of a Pipeline instance. +func (b PipelineBuilder) WithArtifactProcessors(processors ...ArtifactProcessor) PipelineBuilder { + b.ArtifactProcessors = processors + return b } -// WithProcessors configures the UnitProcess of a Pipeline instance. -func WithProcessors(processors ...UnitProcessor) PipelineOption { - return func(p *Pipeline) { - p.Processors = append(p.Processors, processors...) - } +// WithArtifactRegistry configures the ArtifactRegistry of a Pipeline instance. +func (b PipelineBuilder) WithArtifactRegistry(r ArtifactRegistry) PipelineBuilder { + b.ArtifactRegistry = r + return b } -// WithArtifactProcessors configures the ArtifactProcessor of a Pipeline instance. -func WithArtifactProcessors(processors ...ArtifactProcessor) PipelineOption { - return func(p *Pipeline) { - p.ArtifactProcessors = append(p.ArtifactProcessors, processors...) - } +func (b PipelineBuilder) WithSourceLoadingStageHooks(h SourceLoadingStageHooks) PipelineBuilder { + b.SourceLoadingStageHooks = h + return b } -// WithTimeProvider configures the TimeProvider of a Pipeline instance. -func WithTimeProvider(tp TimeProvider) PipelineOption { - return func(p *Pipeline) { - p.TimeProvider = tp - } +func (b PipelineBuilder) WithUnitLoadingStageHooks(h UnitLoadingStageHooks) PipelineBuilder { + b.UnitLoadingStageHooks = h + return b } -// WithArtifactRegistry configures the ArtifactRegistry of a Pipeline instance. -func WithArtifactRegistry(r ArtifactRegistry) PipelineOption { - return func(p *Pipeline) { - p.ArtifactRegistry = r - } +func (b PipelineBuilder) WithUnitProcessingStageHooks(h UnitProcessingStageHooks) PipelineBuilder { + b.UnitProcessingStageHooks = h + return b } -// DEFAULTS PIPELINE OPTIONS +func (b PipelineBuilder) WithArtifactProcessingStageHooks(h ArtifactProcessingStageHooks) PipelineBuilder { + b.ArtifactProcessingStageHooks = h + return b +} -func WithDefaultLogger() PipelineOption { - return WithLogger(NewDefaultLogger(DefaultLoggerConfig{DisableColors: false, Writer: os.Stdout})) +func (b PipelineBuilder) Build() Pipeline { + return DefaultPipeline{ + TimeProvider: CurrentTimeProvider, + SourceLoadingStage: sourceLoadingStage{ + SourceLoaders: b.SourceLoaders, + Hooks: b.SourceLoadingStageHooks, + }, + UnitLoadingStage: unitLoadingStage{ + Loaders: b.UnitLoaders, + Hooks: b.UnitLoadingStageHooks, + }, + UnitProcessingStage: unitProcessingStage{ + Processors: b.UnitProcessors, + Hooks: b.UnitProcessingStageHooks, + }, + ArtifactProcessingStage: artifactProcessingStage{ + Registry: b.ArtifactRegistry, + Processors: b.ArtifactProcessors, + Hooks: b.ArtifactProcessingStageHooks, + }, + } } -func WithJSONArtifactRegistry(fileName string, fs FileSystem) PipelineOption { - return WithArtifactRegistry(NewJSONArtifactRegistry(fileName, fs)) +func (b PipelineBuilder) WithJSONArtifactRegistry(fileName string, fs FileSystem) PipelineBuilder { + return b.WithArtifactRegistry(NewJSONArtifactRegistry(fileName, fs)) } // Loaders @@ -104,14 +131,24 @@ func NewLocalFileSourceLoader() *FileSystemSourceLoader { return NewFileSystemSourceLoader(LocalFileSystem{}) } -// ARTIFACT REGISTRIES +// UNIT PROCESSING + +func NewUnitProcessorFunc(name string, processFunc func(ctx UnitProcessingContext) ([]Artifact, error)) UnitProcessor { + return &UnitProcessorFunc{name: name, processFunc: processFunc} +} + +// ARTIFACT PROCESSING + +func NewArtifactProcessorFunc(name string, processFunc func(ctx ArtifactProcessingContext) error) ArtifactProcessor { + return &ArtifactProcessorFunc{name: name, processFunc: processFunc} +} // NewJSONArtifactRegistry returns a new artifact file registry. func NewJSONArtifactRegistry(fileName string, fs FileSystem) *JSONArtifactRegistry { return &JSONArtifactRegistry{ InMemoryArtifactRegistry: &InMemoryArtifactRegistry{}, FilePath: fileName, - TimeProvider: CurrentTimeProvider(), + TimeProvider: CurrentTimeProvider, FileSystem: fs, } } diff --git a/pkg/specter/assembly_test.go b/pkg/specter/assembly_test.go index 1574888..b7a27be 100644 --- a/pkg/specter/assembly_test.go +++ b/pkg/specter/assembly_test.go @@ -14,66 +14,47 @@ package specter_test -import ( - "github.com/morebec/specter/pkg/specter" - "github.com/morebec/specter/pkg/specterutils" - "github.com/morebec/specter/pkg/testutils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "testing" -) - -func TestWithDefaultLogger(t *testing.T) { - p := specter.NewPipeline(specter.WithDefaultLogger()) - assert.IsType(t, &specter.DefaultLogger{}, p.Logger) -} - -func TestWithSourceLoaders(t *testing.T) { - loader := &specter.FileSystemSourceLoader{} - p := specter.NewPipeline(specter.WithSourceLoaders(loader)) - require.Contains(t, p.SourceLoaders, loader) -} - -func TestWithLoaders(t *testing.T) { - loader := &specterutils.HCLGenericUnitLoader{} - p := specter.NewPipeline(specter.WithLoaders(loader)) - require.Contains(t, p.Loaders, loader) -} - -func TestWithProcessors(t *testing.T) { - processor := specterutils.LintingProcessor{} - p := specter.NewPipeline(specter.WithProcessors(processor)) - require.Contains(t, p.Processors, processor) -} - -func TestWithArtifactProcessors(t *testing.T) { - processor := specter.FileArtifactProcessor{} - p := specter.NewPipeline(specter.WithArtifactProcessors(processor)) - require.Contains(t, p.ArtifactProcessors, processor) -} - -func TestWithTimeProvider(t *testing.T) { - tp := specter.CurrentTimeProvider() - p := specter.NewPipeline(specter.WithTimeProvider(tp)) - require.NotNil(t, p.TimeProvider) -} - -func TestWithArtifactRegistry(t *testing.T) { - registry := &specter.InMemoryArtifactRegistry{} - p := specter.NewPipeline(specter.WithArtifactRegistry(registry)) - require.Equal(t, p.ArtifactRegistry, registry) -} - -func TestWithJSONArtifactRegistry(t *testing.T) { - fs := &testutils.MockFileSystem{} - filePath := specter.DefaultJSONArtifactRegistryFileName - - p := specter.NewPipeline(specter.WithJSONArtifactRegistry(filePath, fs)) - require.IsType(t, &specter.JSONArtifactRegistry{}, p.ArtifactRegistry) - registry := p.ArtifactRegistry.(*specter.JSONArtifactRegistry) - - assert.Equal(t, registry.FileSystem, fs) - assert.Equal(t, registry.FilePath, filePath) - - assert.NotNil(t, registry.TimeProvider()) -} +//func TestPipelineBuilder_WithSourceLoaders(t *testing.T) { +// loader := &specter.FileSystemSourceLoader{} +// b := specter.NewPipeline().WithSourceLoaders(loader) +// +// require.Contains(t, b.SourceLoadingStage.SourceLoaders, loader) +//} +// +//func TestPipelineBuilder_WithUnitLoaders(t *testing.T) { +// loader := &specterutils.HCLGenericUnitLoader{} +// p := specter.NewPipeline().WithUnitLoaders(loader) +// require.Contains(t, p.UnitLoadingStage.Loaders, loader) +//} +// +//func TestPipelineBuilder_WithProcessors(t *testing.T) { +// processor := specterutils.LintingProcessor{} +// p := specter.NewPipeline().WithUnitProcessors(processor) +// require.Contains(t, p.UnitProcessingStage.Processors, processor) +//} +// +//func TestPipelineBuilder_WithArtifactProcessors(t *testing.T) { +// processor := specter.FileArtifactProcessor{} +// p := specter.NewPipeline().WithArtifactProcessors(processor) +// require.Contains(t, p.ArtifactProcessingStage.ArtifactProcessors, processor) +//} +// +//func TestPipelineBuilder_WithArtifactRegistry(t *testing.T) { +// registry := &specter.InMemoryArtifactRegistry{} +// p := specter.NewPipeline().WithArtifactRegistry(registry) +// require.Equal(t, p.ArtifactProcessingStage.ArtifactRegistry, registry) +//} +// +//func TestPipelineBuilder_WithJSONArtifactRegistry(t *testing.T) { +// fs := &testutils.MockFileSystem{} +// filePath := specter.DefaultJSONArtifactRegistryFileName +// +// p := specter.NewPipeline().WithJSONArtifactRegistry(filePath, fs) +// require.IsType(t, &specter.JSONArtifactRegistry{}, p.ArtifactProcessingStage.ArtifactRegistry) +// registry := p.ArtifactProcessingStage.ArtifactRegistry.(*specter.JSONArtifactRegistry) +// +// assert.Equal(t, registry.FileSystem, fs) +// assert.Equal(t, registry.FilePath, filePath) +// +// assert.NotNil(t, registry.TimeProvider()) +//} diff --git a/pkg/specter/filesystem.go b/pkg/specter/filesystem.go index f24bb6c..e8a4158 100644 --- a/pkg/specter/filesystem.go +++ b/pkg/specter/filesystem.go @@ -68,8 +68,6 @@ type FileSystem interface { Remove(path string) error } -var _ FileSystem = LocalFileSystem{} - // LocalFileSystem is an implementation of a FileSystem that works on the local file system where this program is running. type LocalFileSystem struct{} diff --git a/pkg/specter/pipeline.go b/pkg/specter/pipeline.go index 4feb668..b359057 100644 --- a/pkg/specter/pipeline.go +++ b/pkg/specter/pipeline.go @@ -16,41 +16,39 @@ package specter import ( "context" - "fmt" - "github.com/morebec/go-errors/errors" "time" ) type RunMode string -// PreviewMode will cause a Pipeline instance to run until the processing step only, no artifact will be processed. -const PreviewMode RunMode = "preview" - -// RunThrough will cause a Pipeline instance to be run fully. -const RunThrough RunMode = "run-through" - -const defaultRunMode = PreviewMode +const ( + RunThrough RunMode = "run-through" + StopAfterSourceLoadingStage RunMode = "stop-after-source-loading-stage" + StopAfterUnitLoadingStage RunMode = "stop-after-unit-loading-stage" + StopAfterUnitProcessingStage RunMode = "stop-after-unit-processing-stage" +) const SourceLoadingFailedErrorCode = "specter.source_loading_failed" const UnitLoadingFailedErrorCode = "specter.unit_loading_failed" const UnitProcessingFailedErrorCode = "specter.unit_processing_failed" const ArtifactProcessingFailedErrorCode = "specter.artifact_processing_failed" -// Pipeline is the service responsible to run a specter pipeline. -type Pipeline struct { - SourceLoaders []SourceLoader - Loaders []UnitLoader - Processors []UnitProcessor - ArtifactProcessors []ArtifactProcessor - ArtifactRegistry ArtifactRegistry - Logger Logger - TimeProvider TimeProvider +type PipelineResult struct { + PipelineContextData + EndedAt time.Time } -type PipelineResult struct { - StartedAt time.Time - EndedAt time.Time +func (r PipelineResult) ExecutionTime() time.Duration { + return r.EndedAt.Sub(r.StartedAt) +} +type PipelineContext struct { + context.Context + PipelineContextData +} + +type PipelineContextData struct { + StartedAt time.Time SourceLocations []string Sources []Source Units []Unit @@ -58,238 +56,54 @@ type PipelineResult struct { RunMode RunMode } -func (r PipelineResult) ExecutionTime() time.Duration { - return r.EndedAt.Sub(r.StartedAt) +type Pipeline interface { + Run(ctx context.Context, sourceLocations []string, runMode RunMode) (result PipelineResult, err error) } -// Run the pipeline from start to finish. -func (p Pipeline) Run(ctx context.Context, sourceLocations []string, runMode RunMode) (result PipelineResult, err error) { - if ctx == nil { - ctx = context.Background() - } - - if runMode == "" { - p.Logger.Warning(fmt.Sprintf("No run mode provided, defaulting to %q", defaultRunMode)) - runMode = defaultRunMode - } - - result = PipelineResult{ - StartedAt: p.TimeProvider(), - SourceLocations: sourceLocations, - RunMode: runMode, - } - - defer func() { - result.EndedAt = p.TimeProvider() - p.logResult(result) - }() - - // Load sources - result.Sources, err = p.loadSources(ctx, sourceLocations) - if err != nil { - e := errors.WrapWithMessage(err, SourceLoadingFailedErrorCode, "failed loading sources") - p.Logger.Error(e.Error()) - return result, e - } - - // Load Units - result.Units, err = p.loadUnits(ctx, result.Sources) - if err != nil { - e := errors.WrapWithMessage(err, UnitLoadingFailedErrorCode, "failed loading units") - p.Logger.Error(e.Error()) - return result, e - } - - // Process Units - result.Artifacts, err = p.processUnits(ctx, result.Units) - if err != nil { - e := errors.WrapWithMessage(err, UnitProcessingFailedErrorCode, "failed processing units") - p.Logger.Error(e.Error()) - return result, e - } - - // stop here if preview - if result.RunMode == PreviewMode { - return result, nil - } - - // Process Artifact - if err = p.processArtifacts(ctx, result.Units, result.Artifacts); err != nil { - e := errors.WrapWithMessage(err, ArtifactProcessingFailedErrorCode, "failed processing artifacts") - p.Logger.Error(e.Error()) - return result, e - } - - p.Logger.Success("\nProcessing completed successfully.") - return result, nil +type SourceLoadingStage interface { + Run(ctx PipelineContext, sourceLocations []string) ([]Source, error) } -func (p Pipeline) logResult(run PipelineResult) { - p.Logger.Info(fmt.Sprintf("\nRun Mode: %s", run.RunMode)) - p.Logger.Info(fmt.Sprintf("Started At: %s", run.StartedAt)) - p.Logger.Info(fmt.Sprintf("Ended at: %s", run.EndedAt)) - p.Logger.Info(fmt.Sprintf("Run time: %s", run.ExecutionTime())) - p.Logger.Info(fmt.Sprintf("Number of source locations: %d", len(run.SourceLocations))) - p.Logger.Info(fmt.Sprintf("Number of sources: %d", len(run.Sources))) - p.Logger.Info(fmt.Sprintf("Number of units: %d", len(run.Units))) - p.Logger.Info(fmt.Sprintf("Number of artifacts: %d", len(run.Artifacts))) +type UnitLoadingStage interface { + Run(ctx PipelineContext, sources []Source) ([]Unit, error) } -// loadSources only performs the Load sources step. -func (p Pipeline) loadSources(ctx context.Context, sourceLocations []string) ([]Source, error) { - var sources []Source - errs := errors.NewGroup(errors.InternalErrorCode) - - p.Logger.Info(fmt.Sprintf("\nLoading sources from (%d) locations:", len(sourceLocations))) - for _, sl := range sourceLocations { - p.Logger.Info(fmt.Sprintf("-> %q", sl)) - } - - for _, sl := range sourceLocations { - if err := ctx.Err(); err != nil { - return nil, err - } - - loaded := false - for _, l := range p.SourceLoaders { - if l.Supports(sl) { - loadedSources, err := l.Load(sl) - if err != nil { - p.Logger.Error(err.Error()) - errs = errs.Append(err) - continue - } - sources = append(sources, loadedSources...) - loaded = true - } - } - if !loaded { - p.Logger.Warning(fmt.Sprintf("source location %q was not loaded.", sl)) - } - } - - return sources, errors.GroupOrNil(errs) +type UnitProcessingStage interface { + Run(ctx PipelineContext, units []Unit) ([]Artifact, error) } -// loadUnits performs the loading of Units. -func (p Pipeline) loadUnits(ctx context.Context, sources []Source) ([]Unit, error) { - p.Logger.Info("\nLoading units ...") - - // Load units - var units []Unit - var sourcesNotLoaded []Source - errs := errors.NewGroup(errors.InternalErrorCode) - - for _, src := range sources { - if err := ctx.Err(); err != nil { - return nil, err - } - wasLoaded := false - for _, l := range p.Loaders { - if !l.SupportsSource(src) { - continue - } - - loadedUnits, err := l.Load(src) - if err != nil { - p.Logger.Error(err.Error()) - errs = errs.Append(err) - continue - } - - units = append(units, loadedUnits...) - wasLoaded = true - } - - if !wasLoaded { - sourcesNotLoaded = append(sourcesNotLoaded, src) - } - } - - if len(sourcesNotLoaded) > 0 { - for _, src := range sourcesNotLoaded { - p.Logger.Warning(fmt.Sprintf("%q could not be loaded.", src.Location)) - } - - p.Logger.Warning("%d units were not loaded.") - } - - p.Logger.Info(fmt.Sprintf("%d units loaded.", len(units))) - - return units, errors.GroupOrNil(errs) +type ArtifactProcessingStage interface { + Run(ctx PipelineContext, artifacts []Artifact) error } -// processUnits sends the units to processors. -func (p Pipeline) processUnits(ctx context.Context, units []Unit) ([]Artifact, error) { - pctx := ProcessingContext{ - Context: ctx, - Units: units, - Artifacts: nil, - Logger: p.Logger, - } - - p.Logger.Info("\nProcessing units ...") - for _, processor := range p.Processors { - if err := ctx.Err(); err != nil { - return nil, err - } - artifacts, err := processor.Process(pctx) - if err != nil { - return nil, errors.WrapWithMessage(err, errors.InternalErrorCode, fmt.Sprintf("processor %q failed", processor.Name())) - } - pctx.Artifacts = append(pctx.Artifacts, artifacts...) - } - - p.Logger.Info(fmt.Sprintf("%d artifacts generated.", len(pctx.Artifacts))) - for _, o := range pctx.Artifacts { - p.Logger.Info(fmt.Sprintf("-> %s", o.ID())) - } - - p.Logger.Success("Units processed successfully.") - return pctx.Artifacts, nil +type SourceLoadingStageHooks interface { + Before(ctx PipelineContext) error + After(ctx PipelineContext) error + BeforeSourceLocation(ctx PipelineContext, sourceLocation string) error + AfterSourceLocation(ctx PipelineContext, sourceLocation string) error + OnError(ctx PipelineContext, err error) error } -// processArtifacts sends a list of ProcessingArtifacts to the registered ArtifactProcessors. -func (p Pipeline) processArtifacts(ctx context.Context, units []Unit, artifacts []Artifact) error { - if p.ArtifactRegistry == nil { - p.ArtifactRegistry = &InMemoryArtifactRegistry{} - } - - p.Logger.Info("\nProcessing artifacts ...") - if err := p.ArtifactRegistry.Load(); err != nil { - return fmt.Errorf("failed loading artifact registry: %w", err) - } - - defer func() { - if err := p.ArtifactRegistry.Save(); err != nil { - p.Logger.Error(fmt.Errorf("failed saving artifact registry: %w", err).Error()) - } - }() - - for _, processor := range p.ArtifactProcessors { - if err := ctx.Err(); err != nil { - return err - } - - processorName := processor.Name() - artifactCtx := ArtifactProcessingContext{ - Context: ctx, - Units: units, - Artifacts: artifacts, - Logger: p.Logger, - ArtifactRegistry: ProcessorArtifactRegistry{ - processorName: processorName, - registry: p.ArtifactRegistry, - }, - processorName: processorName, - } +type UnitLoadingStageHooks interface { + Before(ctx PipelineContext) error + After(ctx PipelineContext) error + BeforeSource(ctx PipelineContext, source Source) error + AfterSource(ctx PipelineContext, source Source) error + OnError(ctx PipelineContext, err error) error +} - err := processor.Process(artifactCtx) - if err != nil { - return errors.WrapWithMessage(err, errors.InternalErrorCode, fmt.Sprintf("artifact processor %q failed", processorName)) - } - } +type UnitProcessingStageHooks interface { + Before(ctx PipelineContext) error + After(ctx PipelineContext) error + BeforeProcessor(ctx PipelineContext, processorName string) error + AfterProcessor(ctx PipelineContext, processorName string) error + OnError(ctx PipelineContext, err error) error +} - p.Logger.Success("Artifacts processed successfully.") - return nil +type ArtifactProcessingStageHooks interface { + Before(ctx PipelineContext) error + After(ctx PipelineContext) error + BeforeProcessor(ctx PipelineContext, processorName string) error + AfterProcessor(ctx PipelineContext, processorName string) error + OnError(ctx PipelineContext, err error) error } diff --git a/pkg/specter/pipeline_test.go b/pkg/specter/pipeline_test.go index c4fce0e..0340668 100644 --- a/pkg/specter/pipeline_test.go +++ b/pkg/specter/pipeline_test.go @@ -15,9 +15,7 @@ package specter_test import ( - "context" "github.com/morebec/specter/pkg/specter" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" "time" @@ -30,122 +28,3 @@ func TestRunResult_ExecutionTime(t *testing.T) { require.Equal(t, r.ExecutionTime(), time.Hour*1) } - -func TestUnitter_Run(t *testing.T) { - testDay := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) - - type given struct { - pipeline func() *specter.Pipeline - } - - type when struct { - context context.Context - sourceLocations []string - executionMode specter.RunMode - } - - type then struct { - expectedRunResult specter.PipelineResult - expectedError assert.ErrorAssertionFunc - } - - tests := []struct { - name string - given given - when when - then then - }{ - { - name: "WHEN no source locations provided THEN return with no error", - given: given{ - pipeline: func() *specter.Pipeline { - return specter.NewPipeline( - specter.WithTimeProvider(staticTimeProvider(testDay)), - ) - }, - }, - when: when{ - context: context.Background(), - sourceLocations: nil, - executionMode: specter.PreviewMode, - }, - then: then{ - expectedRunResult: specter.PipelineResult{ - RunMode: specter.PreviewMode, - Sources: nil, - Units: nil, - Artifacts: nil, - StartedAt: testDay, - EndedAt: testDay, - }, - expectedError: assert.NoError, - }, - }, - { - name: "WHEN no execution mode provided THEN assume Preview mode", - given: given{ - pipeline: func() *specter.Pipeline { - return specter.NewPipeline( - specter.WithTimeProvider(staticTimeProvider(testDay)), - ) - }, - }, - when: when{ - context: context.Background(), - sourceLocations: nil, - executionMode: "", // No execution mode should default to preview - }, - then: then{ - expectedRunResult: specter.PipelineResult{ - RunMode: specter.PreviewMode, - Sources: nil, - Units: nil, - Artifacts: nil, - StartedAt: testDay, - EndedAt: testDay, - }, - expectedError: assert.NoError, - }, - }, - { - name: "WHEN no context is provided THEN assume a context.Background and do not fail", - given: given{ - pipeline: func() *specter.Pipeline { - return specter.NewPipeline( - specter.WithTimeProvider(staticTimeProvider(testDay)), - ) - }, - }, - when: when{ - context: nil, - sourceLocations: nil, - executionMode: "", // No execution mode should default to preview - }, - then: then{ - expectedRunResult: specter.PipelineResult{ - RunMode: specter.PreviewMode, - Sources: nil, - Units: nil, - Artifacts: nil, - StartedAt: testDay, - EndedAt: testDay, - }, - expectedError: assert.NoError, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := tt.given.pipeline() - - actualResult, err := p.Run(tt.when.context, tt.when.sourceLocations, tt.when.executionMode) - if tt.then.expectedError != nil { - tt.then.expectedError(t, err) - } else { - require.NoError(t, err) - } - - require.Equal(t, tt.then.expectedRunResult, actualResult) - }) - } -} diff --git a/pkg/specter/pipelinedefault.go b/pkg/specter/pipelinedefault.go new file mode 100644 index 0000000..ac3768b --- /dev/null +++ b/pkg/specter/pipelinedefault.go @@ -0,0 +1,455 @@ +// Copyright 2024 Morébec +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specter + +import ( + "context" + "fmt" + "github.com/morebec/go-errors/errors" +) + +// DefaultPipeline is the service responsible to run a specter DefaultPipeline. +type DefaultPipeline struct { + TimeProvider TimeProvider + + SourceLoadingStage SourceLoadingStage + UnitLoadingStage UnitLoadingStage + UnitProcessingStage UnitProcessingStage + ArtifactProcessingStage ArtifactProcessingStage +} + +// Run the DefaultPipeline from start to finish. +func (p DefaultPipeline) Run(ctx context.Context, sourceLocations []string, runMode RunMode) (PipelineResult, error) { + if runMode == "" { + runMode = RunThrough + } + + pctx := &PipelineContext{ + Context: ctx, + PipelineContextData: PipelineContextData{ + StartedAt: p.TimeProvider(), + SourceLocations: sourceLocations, + RunMode: runMode, + }, + } + + err := p.run(pctx, sourceLocations, runMode) + + result := PipelineResult{ + PipelineContextData: pctx.PipelineContextData, + EndedAt: p.TimeProvider(), + } + + return result, err +} + +func (p DefaultPipeline) run(pctx *PipelineContext, sourceLocations []string, runMode RunMode) error { + if err := p.runSourceLoadingStage(pctx, sourceLocations); err != nil { + return err + } + if runMode == StopAfterSourceLoadingStage { + return nil + } + + if err := p.runUnitLoadingStage(pctx); err != nil { + return err + } + if runMode == StopAfterUnitLoadingStage { + return nil + } + + if err := p.runUnitProcessingStage(pctx); err != nil { + return err + } + if runMode == StopAfterUnitProcessingStage { + return nil + } + + if err := p.runArtifactProcessingStage(pctx); err != nil { + return err + } + return nil +} + +func (p DefaultPipeline) runSourceLoadingStage(pctx *PipelineContext, sourceLocations []string) error { + if err := pctx.Err(); err != nil { + return err + } + + var err error + if p.SourceLoadingStage != nil { + pctx.Sources, err = p.SourceLoadingStage.Run(*pctx, sourceLocations) + if err != nil { + return errors.WrapWithMessage(err, SourceLoadingFailedErrorCode, "failed loading sources") + } + } + + return nil +} + +func (p DefaultPipeline) runUnitLoadingStage(pctx *PipelineContext) error { + if err := pctx.Err(); err != nil { + return err + } + + var err error + if p.UnitLoadingStage != nil { + pctx.Units, err = p.UnitLoadingStage.Run(*pctx, pctx.Sources) + if err != nil { + return errors.WrapWithMessage(err, UnitLoadingFailedErrorCode, "failed loading units") + } + } + + return nil +} + +func (p DefaultPipeline) runUnitProcessingStage(pctx *PipelineContext) error { + if err := pctx.Err(); err != nil { + return err + } + + var err error + if p.UnitProcessingStage != nil { + pctx.Artifacts, err = p.UnitProcessingStage.Run(*pctx, pctx.Units) + if err != nil { + return errors.WrapWithMessage(err, UnitProcessingFailedErrorCode, "failed processing units") + } + } + return nil +} + +func (p DefaultPipeline) runArtifactProcessingStage(pctx *PipelineContext) error { + if err := pctx.Err(); err != nil { + return err + } + + if p.ArtifactProcessingStage != nil { + if err := p.ArtifactProcessingStage.Run(*pctx, pctx.Artifacts); err != nil { + return errors.WrapWithMessage(err, ArtifactProcessingFailedErrorCode, "failed processing artifacts") + } + } + return nil +} + +type SourceLoadingStageHooksAdapter struct{} + +func (_ SourceLoadingStageHooksAdapter) Before(_ PipelineContext) error { return nil } +func (_ SourceLoadingStageHooksAdapter) After(_ PipelineContext) error { return nil } +func (_ SourceLoadingStageHooksAdapter) BeforeSourceLocation(_ PipelineContext, _ string) error { + return nil +} +func (_ SourceLoadingStageHooksAdapter) AfterSourceLocation(_ PipelineContext, _ string) error { + return nil +} +func (_ SourceLoadingStageHooksAdapter) OnError(_ PipelineContext, err error) error { + return err +} + +type sourceLoadingStage struct { + SourceLoaders []SourceLoader + Hooks SourceLoadingStageHooks +} + +func (s sourceLoadingStage) Run(ctx PipelineContext, sourceLocations []string) ([]Source, error) { + if s.Hooks == nil { + s.Hooks = SourceLoadingStageHooksAdapter{} + } + + if err := s.Hooks.Before(ctx); err != nil { + return nil, newFailedToRunHookErr(err, "Before") + } + + sources, err := s.run(ctx, sourceLocations) + if err != nil { + err = errors.WrapWithMessage(err, SourceLoadingFailedErrorCode, "failed to load sources") + return nil, s.Hooks.OnError(ctx, err) + } + + if err := s.Hooks.After(ctx); err != nil { + return nil, newFailedToRunHookErr(err, "After") + } + + return sources, nil +} + +func (s sourceLoadingStage) run(ctx PipelineContext, sourceLocations []string) ([]Source, error) { + ctx.SourceLocations = sourceLocations + + errs := errors.NewGroup(SourceLoadingFailedErrorCode) + + for _, sl := range sourceLocations { + if err := ctx.Err(); err != nil { + return nil, err + } + + if err := s.Hooks.BeforeSourceLocation(ctx, sl); err != nil { + return nil, newFailedToRunHookErr(err, "BeforeSourceLocation") + } + + sources, err := s.processSourceLocation(ctx, sl) + if err != nil { + errs = errs.Append(err) + continue + } + ctx.Sources = append(ctx.Sources, sources...) + + if err := s.Hooks.AfterSourceLocation(ctx, sl); err != nil { + return nil, newFailedToRunHookErr(err, "AfterSourceLocation") + } + } + return ctx.Sources, errors.GroupOrNil(errs) +} + +func (s sourceLoadingStage) processSourceLocation(ctx PipelineContext, sl string) ([]Source, error) { + var sources []Source + for _, l := range s.SourceLoaders { + if !l.Supports(sl) { + continue + } + loadedSources, err := l.Load(sl) + if err != nil { + return nil, err + } + ctx.Sources = append(ctx.Sources, loadedSources...) + } + return sources, nil +} + +type UnitLoadingStageHooksAdapter struct{} + +func (_ UnitLoadingStageHooksAdapter) Before(_ PipelineContext) error { return nil } +func (_ UnitLoadingStageHooksAdapter) After(_ PipelineContext) error { return nil } +func (_ UnitLoadingStageHooksAdapter) BeforeSource(_ PipelineContext, _ Source) error { return nil } +func (_ UnitLoadingStageHooksAdapter) AfterSource(_ PipelineContext, _ Source) error { return nil } +func (_ UnitLoadingStageHooksAdapter) OnError(_ PipelineContext, err error) error { return err } + +type UnitProcessingStageHooksAdapter struct { +} + +func (_ UnitProcessingStageHooksAdapter) Before(_ PipelineContext) error { return nil } +func (_ UnitProcessingStageHooksAdapter) After(_ PipelineContext) error { return nil } +func (_ UnitProcessingStageHooksAdapter) BeforeProcessor(_ PipelineContext, _ string) error { + return nil +} +func (_ UnitProcessingStageHooksAdapter) AfterProcessor(_ PipelineContext, _ string) error { + return nil +} +func (_ UnitProcessingStageHooksAdapter) OnError(_ PipelineContext, err error) error { return err } + +type unitLoadingStage struct { + Loaders []UnitLoader + Hooks UnitLoadingStageHooks +} + +func (s unitLoadingStage) Run(ctx PipelineContext, sources []Source) ([]Unit, error) { + if s.Hooks == nil { + s.Hooks = UnitLoadingStageHooksAdapter{} + } + + if err := s.Hooks.Before(ctx); err != nil { + return nil, err + } + + errs := errors.NewGroup(errors.InternalErrorCode) + for _, src := range sources { + if err := ctx.Err(); err != nil { + return nil, s.handleError(ctx, err) + } + + if err := s.Hooks.BeforeSource(ctx, src); err != nil { + return nil, err + } + + for _, l := range s.Loaders { + if !l.SupportsSource(src) { + continue + } + + loadedUnits, err := l.Load(src) + if err != nil { + errs = errs.Append(err) + continue + } + ctx.Units = append(ctx.Units, loadedUnits...) + } + + if err := s.Hooks.AfterSource(ctx, src); err != nil { + return nil, err + } + } + + if err := s.Hooks.After(ctx); err != nil { + return nil, err + } + + return ctx.Units, s.handleError(ctx, errors.GroupOrNil(errs)) +} + +func (s unitLoadingStage) handleError(ctx PipelineContext, err error) error { + return s.Hooks.OnError(ctx, err) +} + +type unitProcessingStage struct { + Processors []UnitProcessor + Hooks UnitProcessingStageHooks +} + +func (s unitProcessingStage) Run(ctx PipelineContext, units []Unit) ([]Artifact, error) { + if s.Hooks == nil { + s.Hooks = UnitProcessingStageHooksAdapter{} + } + + if err := s.Hooks.Before(ctx); err != nil { + return nil, newFailedToRunHookErr(err, "Before") + } + + artifacts, err := s.run(ctx, units) + if err != nil { + return nil, s.Hooks.OnError(ctx, errors.WrapWithMessage(err, UnitProcessingFailedErrorCode, "failed processing units")) + } + + if err := s.Hooks.After(ctx); err != nil { + return nil, newFailedToRunHookErr(err, "After") + } + + return artifacts, nil +} + +func (s unitProcessingStage) run(ctx PipelineContext, units []Unit) ([]Artifact, error) { + for _, processor := range s.Processors { + if err := ctx.Err(); err != nil { + return nil, err + } + + if err := s.Hooks.BeforeProcessor(ctx, processor.Name()); err != nil { + return nil, newFailedToRunHookErr(err, "AfterProcessor") + } + + artifacts, err := processor.Process(UnitProcessingContext{ + Context: ctx, + Units: units, + Artifacts: ctx.Artifacts, + }) + if err != nil { + return nil, fmt.Errorf("processor %q returned an error :%w", processor.Name(), err) + } + + ctx.Artifacts = append(ctx.Artifacts, artifacts...) + + if err := s.Hooks.AfterProcessor(ctx, processor.Name()); err != nil { + return nil, newFailedToRunHookErr(err, "AfterProcessor") + } + } + + return ctx.Artifacts, nil +} + +type ArtifactProcessingStageHooksAdapter struct { +} + +func (_ ArtifactProcessingStageHooksAdapter) Before(PipelineContext) error { return nil } +func (_ ArtifactProcessingStageHooksAdapter) After(PipelineContext) error { return nil } +func (_ ArtifactProcessingStageHooksAdapter) BeforeProcessor(PipelineContext, string) error { + return nil +} +func (_ ArtifactProcessingStageHooksAdapter) AfterProcessor(PipelineContext, string) error { + return nil +} +func (_ ArtifactProcessingStageHooksAdapter) OnError(_ PipelineContext, err error) error { return err } + +type artifactProcessingStage struct { + Registry ArtifactRegistry + Processors []ArtifactProcessor + Hooks ArtifactProcessingStageHooks +} + +func (s artifactProcessingStage) Run(ctx PipelineContext, artifacts []Artifact) error { + if s.Hooks == nil { + s.Hooks = ArtifactProcessingStageHooksAdapter{} + } + + if err := s.Hooks.Before(ctx); err != nil { + return newFailedToRunHookErr(err, "Before") + } + + if err := s.run(ctx, artifacts); err != nil { + err = errors.WrapWithMessage(err, ArtifactProcessingFailedErrorCode, "failed processing artifacts") + return s.Hooks.OnError(ctx, err) + } + + if err := s.Hooks.After(ctx); err != nil { + return newFailedToRunHookErr(err, "After") + } + + return nil +} + +func (s artifactProcessingStage) run(ctx PipelineContext, artifacts []Artifact) (err error) { + if s.Registry == nil { + s.Registry = &InMemoryArtifactRegistry{} + } + + if err := s.Registry.Load(); err != nil { + return fmt.Errorf("failed loading artifact registry: %w", err) + } + + defer func() { + if saveErr := s.Registry.Save(); saveErr != nil { + saveErr = fmt.Errorf("failed saving artifact registry: %w", err) + if err != nil { + err = errors.NewGroup(ArtifactProcessingFailedErrorCode, err, saveErr) + } else { + err = saveErr + } + } + }() + + for _, processor := range s.Processors { + if err := ctx.Err(); err != nil { + return err + } + if err := s.Hooks.BeforeProcessor(ctx, processor.Name()); err != nil { + return newFailedToRunHookErr(err, "BeforeProcessor") + } + if err := s.runProcessor(ctx, processor, artifacts); err != nil { + return fmt.Errorf("artifact processor %q returned an error: %w", processor.Name(), err) + } + if err := s.Hooks.AfterProcessor(ctx, processor.Name()); err != nil { + return newFailedToRunHookErr(err, "AfterProcessor") + } + } + + return nil +} + +func (s artifactProcessingStage) runProcessor(ctx PipelineContext, processor ArtifactProcessor, artifacts []Artifact) error { + processorName := processor.Name() + apCtx := ArtifactProcessingContext{ + Context: ctx, + Units: ctx.Units, + Artifacts: artifacts, + ArtifactRegistry: ProcessorArtifactRegistry{ + processorName: processorName, + registry: s.Registry, + }, + processorName: processorName, + } + + return processor.Process(apCtx) +} + +func newFailedToRunHookErr(err error, hookName string) error { + return fmt.Errorf("hook %q returned an error: %w", hookName, err) +} diff --git a/pkg/specter/pipelinedefault_test.go b/pkg/specter/pipelinedefault_test.go new file mode 100644 index 0000000..e7fc292 --- /dev/null +++ b/pkg/specter/pipelinedefault_test.go @@ -0,0 +1,661 @@ +// Copyright 2024 Morébec +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specter_test + +import ( + "context" + "github.com/morebec/specter/pkg/specter" + "github.com/morebec/specter/pkg/testutils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +func TestSourceLoadingStageHooksAdapter(t *testing.T) { + t.Run("Before should not return error", func(t *testing.T) { + a := specter.SourceLoadingStageHooksAdapter{} + err := a.Before(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("After should not return error", func(t *testing.T) { + a := specter.SourceLoadingStageHooksAdapter{} + err := a.After(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("BeforeSourceLocation should not return error", func(t *testing.T) { + a := specter.SourceLoadingStageHooksAdapter{} + err := a.BeforeSourceLocation(specter.PipelineContext{}, "") + require.NoError(t, err) + }) + + t.Run("AfterSourceLocation should not return error", func(t *testing.T) { + a := specter.SourceLoadingStageHooksAdapter{} + err := a.AfterSourceLocation(specter.PipelineContext{}, "") + require.NoError(t, err) + }) + + t.Run("OnError should return error", func(t *testing.T) { + a := specter.SourceLoadingStageHooksAdapter{} + err := a.OnError(specter.PipelineContext{}, assert.AnError) + require.Equal(t, assert.AnError, err) + }) +} + +func TestUnitLoadingStageHooksAdapter(t *testing.T) { + t.Run("Before should not return error", func(t *testing.T) { + + a := specter.UnitLoadingStageHooksAdapter{} + err := a.Before(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("After should not return error", func(t *testing.T) { + + a := specter.UnitLoadingStageHooksAdapter{} + err := a.After(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("BeforeSource should not return error", func(t *testing.T) { + + a := specter.UnitLoadingStageHooksAdapter{} + err := a.BeforeSource(specter.PipelineContext{}, specter.Source{}) + require.NoError(t, err) + }) + + t.Run("AfterSource should not return error", func(t *testing.T) { + + a := specter.UnitLoadingStageHooksAdapter{} + err := a.AfterSource(specter.PipelineContext{}, specter.Source{}) + require.NoError(t, err) + }) + + t.Run("OnError should return error", func(t *testing.T) { + + a := specter.UnitLoadingStageHooksAdapter{} + err := a.OnError(specter.PipelineContext{}, assert.AnError) + require.Equal(t, assert.AnError, err) + }) +} + +func TestUnitProcessingStageHooksAdapter(t *testing.T) { + t.Run("Before should not return error", func(t *testing.T) { + a := specter.UnitProcessingStageHooksAdapter{} + err := a.Before(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("After should not return error", func(t *testing.T) { + a := specter.UnitProcessingStageHooksAdapter{} + err := a.After(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("BeforeProcessor should not return error", func(t *testing.T) { + a := specter.UnitProcessingStageHooksAdapter{} + err := a.BeforeProcessor(specter.PipelineContext{}, "processor") + require.NoError(t, err) + }) + + t.Run("AfterProcessor should not return error", func(t *testing.T) { + a := specter.UnitProcessingStageHooksAdapter{} + err := a.AfterProcessor(specter.PipelineContext{}, "processor") + require.NoError(t, err) + }) + + t.Run("OnError should return error", func(t *testing.T) { + a := specter.UnitProcessingStageHooksAdapter{} + err := a.OnError(specter.PipelineContext{}, assert.AnError) + require.Equal(t, assert.AnError, err) + }) +} + +func TestArtifactProcessingStageHooksAdapter(t *testing.T) { + t.Run("Before should not return error", func(t *testing.T) { + a := specter.ArtifactProcessingStageHooksAdapter{} + err := a.Before(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("After should not return error", func(t *testing.T) { + a := specter.ArtifactProcessingStageHooksAdapter{} + err := a.After(specter.PipelineContext{}) + require.NoError(t, err) + }) + + t.Run("BeforeProcessor should not return error", func(t *testing.T) { + a := specter.ArtifactProcessingStageHooksAdapter{} + err := a.BeforeProcessor(specter.PipelineContext{}, "processor") + require.NoError(t, err) + }) + + t.Run("AfterProcessor should not return error", func(t *testing.T) { + a := specter.ArtifactProcessingStageHooksAdapter{} + err := a.AfterProcessor(specter.PipelineContext{}, "processor") + require.NoError(t, err) + }) + + t.Run("OnError should return error", func(t *testing.T) { + a := specter.ArtifactProcessingStageHooksAdapter{} + err := a.OnError(specter.PipelineContext{}, assert.AnError) + require.Equal(t, assert.AnError, err) + }) +} + +func TestDefaultPipeline_Run(t *testing.T) { + currentTime := time.Date(2024, 01, 01, 0, 0, 0, 0, time.UTC) + + type given struct { + SourceLoadingStage specter.SourceLoadingStage + UnitLoadingStage specter.UnitLoadingStage + UnitProcessingStage specter.UnitProcessingStage + ArtifactProcessingStage specter.ArtifactProcessingStage + } + type args struct { + ctx context.Context + sourceLocations []string + runMode specter.RunMode + } + type then struct { + expectedResult specter.PipelineResult + expectedError require.ErrorAssertionFunc + } + tests := []struct { + name string + given given + when args + then then + }{ + { + name: "WHEN an empty RunMode is provided THEN should default to RunThrough", + when: args{ + ctx: context.Background(), + sourceLocations: nil, + runMode: "", + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.RunThrough, + }, + EndedAt: currentTime, + }, + expectedError: require.NoError, + }, + }, + { + // Empty pipeline is ok. + name: "GIVEN nil stages provided THEN no error should be returned", + given: given{ + SourceLoadingStage: nil, + UnitLoadingStage: nil, + UnitProcessingStage: nil, + ArtifactProcessingStage: nil, + }, + when: args{ + ctx: context.Background(), + sourceLocations: nil, + runMode: "some-mode", + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: "some-mode", + }, + EndedAt: currentTime, + }, + expectedError: require.NoError, + }, + }, + // Stage Errors + { + name: "GIVEN source loading stage fails THEN an error should be returned", + given: given{ + SourceLoadingStage: FailingSourceLoadingStage{}, + }, + when: args{ + ctx: context.Background(), + runMode: specter.RunThrough, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.RunThrough, + }, + EndedAt: currentTime, + }, + expectedError: testutils.RequireErrorWithCode(specter.SourceLoadingFailedErrorCode), + }, + }, + { + name: "GIVEN unit loading stage fails THEN an error should be returned", + given: given{ + UnitLoadingStage: FailingUnitLoadingStage{}, + }, + when: args{ + ctx: context.Background(), + runMode: specter.RunThrough, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.RunThrough, + }, + EndedAt: currentTime, + }, + expectedError: testutils.RequireErrorWithCode(specter.UnitLoadingFailedErrorCode), + }, + }, + { + name: "GIVEN unit processing stage fails THEN an error should be returned", + given: given{ + UnitProcessingStage: FailingUnitProcessingStage{}, + }, + when: args{ + ctx: context.Background(), + runMode: specter.RunThrough, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.RunThrough, + }, + EndedAt: currentTime, + }, + expectedError: testutils.RequireErrorWithCode(specter.UnitProcessingFailedErrorCode), + }, + }, + { + name: "GIVEN artifact processing stage fails THEN an error should be returned", + given: given{ + ArtifactProcessingStage: FailingArtifactProcessingStage{}, + }, + when: args{ + ctx: context.Background(), + runMode: specter.RunThrough, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.RunThrough, + }, + EndedAt: currentTime, + }, + expectedError: testutils.RequireErrorWithCode(specter.ArtifactProcessingFailedErrorCode), + }, + }, + + // Run Modes + { + name: "WHEN stop after source loading THEN it should stop and no error should be returned", + given: given{ + UnitLoadingStage: FailingUnitLoadingStage{}, // Should not fail + }, + when: args{ + ctx: context.Background(), + runMode: specter.StopAfterSourceLoadingStage, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.StopAfterSourceLoadingStage, + }, + EndedAt: currentTime, + }, + expectedError: require.NoError, + }, + }, + { + name: "WHEN stop after unit loading THEN it should stop and no error should be returned", + given: given{ + UnitProcessingStage: FailingUnitProcessingStage{}, + }, + when: args{ + ctx: context.Background(), + runMode: specter.StopAfterUnitLoadingStage, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.StopAfterUnitLoadingStage, + }, + EndedAt: currentTime, + }, + expectedError: require.NoError, + }, + }, + { + name: "WHEN stop after unit processing THEN it should stop and no error should be returned", + given: given{ + ArtifactProcessingStage: FailingArtifactProcessingStage{}, + }, + when: args{ + ctx: context.Background(), + runMode: specter.StopAfterUnitProcessingStage, + }, + then: then{ + expectedResult: specter.PipelineResult{ + PipelineContextData: specter.PipelineContextData{ + StartedAt: currentTime, + RunMode: specter.StopAfterUnitProcessingStage, + }, + EndedAt: currentTime, + }, + expectedError: require.NoError, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := specter.DefaultPipeline{ + TimeProvider: staticTimeProvider(currentTime), + SourceLoadingStage: tt.given.SourceLoadingStage, + UnitLoadingStage: tt.given.UnitLoadingStage, + UnitProcessingStage: tt.given.UnitProcessingStage, + ArtifactProcessingStage: tt.given.ArtifactProcessingStage, + } + got, err := p.Run(tt.when.ctx, tt.when.sourceLocations, tt.when.runMode) + tt.then.expectedError(t, err) + assert.Equal(t, tt.then.expectedResult, got) + }) + } +} + +func Test_sourceLoadingStage_Run(t *testing.T) { + t.Run("should call all hooks under normal processing", func(t *testing.T) { + recorder := sourceLoadingStageHooksCallRecorder{} + + stage := specter.DefaultSourceLoadingStage{ + SourceLoaders: []specter.SourceLoader{ + specter.FunctionalSourceLoader{ + SupportsFunc: func(string) bool { + return true + }, + LoadFunc: func(location string) ([]specter.Source, error) { + return nil, nil + }, + }, + }, + Hooks: &recorder, + } + + units, err := stage.Run(specter.PipelineContext{Context: context.Background()}, []string{ + "/path/to/file", + }) + require.NoError(t, err) + require.Nil(t, units) + + assert.True(t, recorder.beforeCalled) + assert.True(t, recorder.beforeSourceLocationCalled) + assert.True(t, recorder.afterSourceLocationCalled) + assert.True(t, recorder.afterCalled) + }) + + t.Run("should call hooks until error", func(t *testing.T) { + recorder := sourceLoadingStageHooksCallRecorder{} + + stage := specter.DefaultSourceLoadingStage{ + SourceLoaders: []specter.SourceLoader{ + specter.FunctionalSourceLoader{ + SupportsFunc: func(string) bool { + return true + }, + LoadFunc: func(location string) ([]specter.Source, error) { + return nil, assert.AnError + }, + }, + }, + Hooks: &recorder, + } + + units, err := stage.Run(specter.PipelineContext{Context: context.Background()}, []string{ + "/path/to/file", + }) + require.Error(t, err) + require.Nil(t, units) + + assert.True(t, recorder.beforeCalled) + assert.True(t, recorder.beforeSourceLocationCalled) + assert.True(t, recorder.onErrorCalled) + assert.False(t, recorder.afterSourceLocationCalled) + assert.False(t, recorder.afterCalled) + }) +} + +func Test_unitProcessingStage_Run(t *testing.T) { + t.Run("should call all hooks under normal processing", func(t *testing.T) { + recorder := unitProcessingStageHooksCallRecorder{} + + stage := specter.DefaultUnitProcessingStage{ + Processors: []specter.UnitProcessor{ + specter.NewUnitProcessorFunc("", func(specter.UnitProcessingContext) ([]specter.Artifact, error) { + return nil, nil + }), + }, + Hooks: &recorder, + } + + units, err := stage.Run(specter.PipelineContext{Context: context.Background()}, nil) + require.NoError(t, err) + require.Nil(t, units) + + assert.True(t, recorder.beforeCalled) + assert.True(t, recorder.beforeProcessorCalled) + assert.True(t, recorder.afterProcessorCalled) + assert.True(t, recorder.afterCalled) + }) + + t.Run("should call hooks until error", func(t *testing.T) { + recorder := unitProcessingStageHooksCallRecorder{} + + stage := specter.DefaultUnitProcessingStage{ + Processors: []specter.UnitProcessor{ + specter.NewUnitProcessorFunc("", func(specter.UnitProcessingContext) ([]specter.Artifact, error) { + return nil, assert.AnError + }), + }, + Hooks: &recorder, + } + + units, err := stage.Run(specter.PipelineContext{Context: context.Background()}, nil) + require.Error(t, err) + require.Nil(t, units) + + assert.True(t, recorder.beforeCalled) + assert.True(t, recorder.beforeProcessorCalled) + assert.True(t, recorder.onErrorCalled) + assert.False(t, recorder.afterProcessorCalled) + assert.False(t, recorder.afterCalled) + }) +} + +func Test_artifactProcessingStage_Run(t *testing.T) { + t.Run("should call all hooks under normal processing", func(t *testing.T) { + recorder := artifactProcessingStageHooksCallRecorder{} + + stage := specter.DefaultArtifactProcessingStage{ + Processors: []specter.ArtifactProcessor{ + specter.NewArtifactProcessorFunc("", func(ctx specter.ArtifactProcessingContext) error { return nil }), + }, + Hooks: &recorder, + } + + err := stage.Run(specter.PipelineContext{Context: context.Background()}, []specter.Artifact{ + &specter.FileArtifact{}, + }) + require.NoError(t, err) + + assert.True(t, recorder.beforeCalled) + assert.True(t, recorder.beforeProcessorCalled) + assert.True(t, recorder.afterProcessorCalled) + assert.True(t, recorder.afterCalled) + }) + + t.Run("should call hooks until error", func(t *testing.T) { + recorder := artifactProcessingStageHooksCallRecorder{} + + stage := specter.DefaultArtifactProcessingStage{ + Processors: []specter.ArtifactProcessor{ + specter.NewArtifactProcessorFunc("", func(ctx specter.ArtifactProcessingContext) error { return assert.AnError }), + }, + Hooks: &recorder, + } + + err := stage.Run(specter.PipelineContext{Context: context.Background()}, []specter.Artifact{ + &specter.FileArtifact{}, + }) + require.Error(t, err) + + assert.True(t, recorder.beforeCalled) + assert.True(t, recorder.beforeProcessorCalled) + assert.True(t, recorder.onErrorCalled) + assert.False(t, recorder.afterProcessorCalled) + assert.False(t, recorder.afterCalled) + }) +} + +type FailingSourceLoadingStage struct{} + +func (f FailingSourceLoadingStage) Run(specter.PipelineContext, []string) ([]specter.Source, error) { + return nil, assert.AnError +} + +type FailingUnitLoadingStage struct{} + +func (f FailingUnitLoadingStage) Run(specter.PipelineContext, []specter.Source) ([]specter.Unit, error) { + return nil, assert.AnError +} + +type FailingUnitProcessingStage struct{} + +func (f FailingUnitProcessingStage) Run(specter.PipelineContext, []specter.Unit) ([]specter.Artifact, error) { + return nil, assert.AnError +} + +type FailingArtifactProcessingStage struct{} + +func (f FailingArtifactProcessingStage) Run(specter.PipelineContext, []specter.Artifact) error { + return assert.AnError +} + +type sourceLoadingStageHooksCallRecorder struct { + beforeCalled bool + afterCalled bool + beforeSourceLocationCalled bool + afterSourceLocationCalled bool + onErrorCalled bool +} + +func (s *sourceLoadingStageHooksCallRecorder) Before(specter.PipelineContext) error { + s.beforeCalled = true + return nil +} + +func (s *sourceLoadingStageHooksCallRecorder) After(specter.PipelineContext) error { + s.afterCalled = true + return nil +} + +func (s *sourceLoadingStageHooksCallRecorder) BeforeSourceLocation(specter.PipelineContext, string) error { + s.beforeSourceLocationCalled = true + return nil +} + +func (s *sourceLoadingStageHooksCallRecorder) AfterSourceLocation(specter.PipelineContext, string) error { + s.afterSourceLocationCalled = true + return nil +} + +func (s *sourceLoadingStageHooksCallRecorder) OnError(_ specter.PipelineContext, err error) error { + s.onErrorCalled = true + return err +} + +type unitProcessingStageHooksCallRecorder struct { + beforeCalled bool + afterCalled bool + beforeProcessorCalled bool + afterProcessorCalled bool + onErrorCalled bool +} + +func (a *unitProcessingStageHooksCallRecorder) Before(specter.PipelineContext) error { + a.beforeCalled = true + return nil +} + +func (a *unitProcessingStageHooksCallRecorder) After(specter.PipelineContext) error { + a.afterCalled = true + return nil +} + +func (a *unitProcessingStageHooksCallRecorder) BeforeProcessor(specter.PipelineContext, string) error { + a.beforeProcessorCalled = true + return nil +} + +func (a *unitProcessingStageHooksCallRecorder) AfterProcessor(specter.PipelineContext, string) error { + a.afterProcessorCalled = true + return nil +} + +func (a *unitProcessingStageHooksCallRecorder) OnError(_ specter.PipelineContext, err error) error { + a.onErrorCalled = true + return err +} + +type artifactProcessingStageHooksCallRecorder struct { + beforeCalled bool + afterCalled bool + beforeProcessorCalled bool + afterProcessorCalled bool + onErrorCalled bool +} + +func (a *artifactProcessingStageHooksCallRecorder) Before(specter.PipelineContext) error { + a.beforeCalled = true + return nil +} + +func (a *artifactProcessingStageHooksCallRecorder) After(specter.PipelineContext) error { + a.afterCalled = true + return nil +} + +func (a *artifactProcessingStageHooksCallRecorder) BeforeProcessor(specter.PipelineContext, string) error { + a.beforeProcessorCalled = true + return nil +} + +func (a *artifactProcessingStageHooksCallRecorder) AfterProcessor(specter.PipelineContext, string) error { + a.afterProcessorCalled = true + return nil +} + +func (a *artifactProcessingStageHooksCallRecorder) OnError(_ specter.PipelineContext, err error) error { + a.onErrorCalled = true + return err +} diff --git a/pkg/specter/pipelinedefaultexport_test.go b/pkg/specter/pipelinedefaultexport_test.go new file mode 100644 index 0000000..215ce99 --- /dev/null +++ b/pkg/specter/pipelinedefaultexport_test.go @@ -0,0 +1,6 @@ +package specter + +type DefaultSourceLoadingStage = sourceLoadingStage +type DefaultUnitLoadingStage = unitLoadingStage +type DefaultUnitProcessingStage = unitProcessingStage +type DefaultArtifactProcessingStage = artifactProcessingStage diff --git a/pkg/specter/srcloading.go b/pkg/specter/srcloading.go index c0e9105..941b205 100644 --- a/pkg/specter/srcloading.go +++ b/pkg/specter/srcloading.go @@ -51,8 +51,6 @@ type SourceLoader interface { Load(location string) ([]Source, error) } -var _ SourceLoader = FileSystemSourceLoader{} - // FileSystemSourceLoader is an implementation of a SourceLoader that loads files from a FileSystem. type FileSystemSourceLoader struct { fs FileSystem @@ -153,3 +151,16 @@ func (l FileSystemSourceLoader) loadFile(filePath string) ([]Source, error) { }, }, nil } + +type FunctionalSourceLoader struct { + SupportsFunc func(location string) bool + LoadFunc func(location string) ([]Source, error) +} + +func (f FunctionalSourceLoader) Supports(location string) bool { + return f.SupportsFunc(location) +} + +func (f FunctionalSourceLoader) Load(location string) ([]Source, error) { + return f.LoadFunc(location) +} diff --git a/pkg/specter/srcloading_test.go b/pkg/specter/srcloading_test.go index 925edbd..d0248ef 100644 --- a/pkg/specter/srcloading_test.go +++ b/pkg/specter/srcloading_test.go @@ -183,3 +183,33 @@ func TestLocalFileSourceLoader_Load(t *testing.T) { }) } } + +func TestFunctionalSourceLoader_Supports(t *testing.T) { + t.Run("Supports is called", func(t *testing.T) { + called := false + l := specter.FunctionalSourceLoader{ + SupportsFunc: func(location string) bool { + called = true + return true + }, + } + got := l.Supports("") + require.True(t, called) + require.True(t, got) + }) + + t.Run("Supports is called", func(t *testing.T) { + called := false + l := specter.FunctionalSourceLoader{ + LoadFunc: func(location string) ([]specter.Source, error) { + called = true + return nil, nil + }, + } + load, err := l.Load("") + + require.True(t, called) + require.NoError(t, err) + require.Nil(t, load) + }) +} diff --git a/pkg/specter/time.go b/pkg/specter/time.go index 1daca07..afb9768 100644 --- a/pkg/specter/time.go +++ b/pkg/specter/time.go @@ -18,8 +18,6 @@ import "time" type TimeProvider func() time.Time -func CurrentTimeProvider() TimeProvider { - return func() time.Time { - return time.Now() - } +func CurrentTimeProvider() time.Time { + return time.Now() } diff --git a/pkg/specter/unitloading.go b/pkg/specter/unitloading.go index 84f18d8..24bbc68 100644 --- a/pkg/specter/unitloading.go +++ b/pkg/specter/unitloading.go @@ -17,39 +17,79 @@ package specter // UnsupportedSourceErrorCode ErrorSeverity code returned by a UnitLoader when a given loader does not support a certain source. const UnsupportedSourceErrorCode = "specter.spec_loading.unsupported_source" -// UnitLoader is a service responsible for loading Units from Sources. -type UnitLoader interface { - // Load loads a slice of Unit from a Source, or returns an error if it encountered a failure. - Load(s Source) ([]Unit, error) +type UnitKind string - // SupportsSource indicates if this loader supports a certain source or not. - SupportsSource(s Source) bool -} - -type UnitType string - -type UnitName string +type UnitID string // Unit is a general purpose data structure to represent a unit as loaded from a file regardless of the loader // used. // It is the responsibility of the application using specter to convert a unit to an appropriate data structure representing the intent of a // given Unit. type Unit interface { - // Name returns the unique Name of this unit. - Name() UnitName + // ID returns the unique Name of this unit. + ID() UnitID - // Type returns the type of this unit. - Type() UnitType - - // Description of this unit. - Description() string + // Kind returns the type of this unit. + Kind() UnitKind // Source returns the source of this unit. Source() Source +} + +// WrappingUnit is a generic implementation of a Unit that wraps an underlying value. +// This allows users to pass any value, even those that do not implement the Unit interface, +// through the Spectre pipeline. The wrapped value can be later unwrapped and used as needed. +// +// WrappingUnit provides a flexible solution for users who want to avoid directly +// implementing the Unit interface in their own types or who are dealing with +// primitive types or external structs. +// +// T represents the type of the value being wrapped. +// +// Example usage: +// +// wrapped := Spectre.UnitOf(myValue) +// unwrapped := wrapped.Unwrap() +type WrappingUnit[T any] struct { + id UnitID + kind UnitKind + source Source + wrapped T +} + +func UnitOf[T any](v T, id UnitID, kind UnitKind, source Source) *WrappingUnit[T] { + return &WrappingUnit[T]{ + id: id, + kind: kind, + source: source, + wrapped: v, + } +} + +func (w *WrappingUnit[T]) ID() UnitID { + return w.id +} - // SetSource sets the source of the unit. - // This method should only be used by loaders. - SetSource(s Source) +func (w *WrappingUnit[T]) Kind() UnitKind { + return w.kind +} + +func (w *WrappingUnit[T]) Source() Source { + return w.source +} + +// Unwrap returns the wrapped value. +func (w *WrappingUnit[T]) Unwrap() T { + return w.wrapped +} + +// UnitLoader is a service responsible for loading Units from Sources. +type UnitLoader interface { + // Load loads a slice of Unit from a Source, or returns an error if it encountered a failure. + Load(s Source) ([]Unit, error) + + // SupportsSource indicates if this loader supports a certain source or not. + SupportsSource(s Source) bool } // UnitGroup Represents a list of Unit. @@ -63,25 +103,33 @@ func NewUnitGroup(u ...Unit) UnitGroup { // Merge Allows merging a group with another one. func (g UnitGroup) Merge(group UnitGroup) UnitGroup { merged := g - typeNameIndex := map[UnitName]any{} + idIndex := map[UnitID]any{} for _, u := range g { - typeNameIndex[u.Name()] = nil + idIndex[u.ID()] = nil } for _, u := range group { - if _, found := typeNameIndex[u.Name()]; found { + if _, found := idIndex[u.ID()]; found { continue } - typeNameIndex[u.Name()] = nil + idIndex[u.ID()] = nil merged = append(merged, u) } return merged } -// Select allows filtering the group for certain units. -func (g UnitGroup) Select(p func(u Unit) bool) UnitGroup { +type UnitMatcher func(u Unit) bool + +func UnitWithKindMatcher(kind UnitKind) UnitMatcher { + return func(u Unit) bool { + return u.Kind() == kind + } +} + +// Select allows filtering the group for certain units that match a certain UnitMatcher. +func (g UnitGroup) Select(m UnitMatcher) UnitGroup { r := UnitGroup{} for _, u := range g { - if p(u) { + if m(u) { r = append(r, u) } } @@ -89,15 +137,25 @@ func (g UnitGroup) Select(p func(u Unit) bool) UnitGroup { return r } -func (g UnitGroup) SelectType(t UnitType) UnitGroup { +// Find search for a unit matching the given UnitMatcher. +func (g UnitGroup) Find(m UnitMatcher) (Unit, bool) { + for _, u := range g { + if m(u) { + return u, true + } + } + return nil, false +} + +func (g UnitGroup) SelectType(t UnitKind) UnitGroup { return g.Select(func(u Unit) bool { - return u.Type() == t + return u.Kind() == t }) } -func (g UnitGroup) SelectName(t UnitName) Unit { +func (g UnitGroup) SelectName(t UnitID) Unit { for _, u := range g { - if u.Name() == t { + if u.ID() == t { return u } } @@ -105,10 +163,10 @@ func (g UnitGroup) SelectName(t UnitName) Unit { return nil } -func (g UnitGroup) SelectNames(names ...UnitName) UnitGroup { +func (g UnitGroup) SelectNames(names ...UnitID) UnitGroup { return g.Select(func(u Unit) bool { for _, name := range names { - if u.Name() == name { + if u.ID() == name { return true } } @@ -127,16 +185,16 @@ func (g UnitGroup) Exclude(p func(u Unit) bool) UnitGroup { return r } -func (g UnitGroup) ExcludeType(t UnitType) UnitGroup { +func (g UnitGroup) ExcludeType(t UnitKind) UnitGroup { return g.Exclude(func(u Unit) bool { - return u.Type() == t + return u.Kind() == t }) } -func (g UnitGroup) ExcludeNames(names ...UnitName) UnitGroup { +func (g UnitGroup) ExcludeNames(names ...UnitID) UnitGroup { return g.Exclude(func(u Unit) bool { for _, name := range names { - if u.Name() == name { + if u.ID() == name { return true } } diff --git a/pkg/specter/unitloading_test.go b/pkg/specter/unitloading_test.go index 568d20c..da9dc7b 100644 --- a/pkg/specter/unitloading_test.go +++ b/pkg/specter/unitloading_test.go @@ -43,19 +43,19 @@ func TestNewUnitGroup(t *testing.T) { { name: "GIVEN multiple units WHEN calling NewUnitGroup THEN return a group with those units", given: []specter.Unit{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, when: func() specter.UnitGroup { return specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ) }, then: func(result specter.UnitGroup) bool { return len(result) == 2 && - result[0].Name() == "unit1" && - result[1].Name() == "unit2" + result[0].ID() == "unit1" && + result[1].ID() == "unit2" }, }, } @@ -81,28 +81,28 @@ func TestUnitGroup_Merge(t *testing.T) { { name: "GIVEN two disjoint groups THEN return a group with all units", given: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), ), when: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), then: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), }, { name: "GIVEN two groups with overlapping units THEN return a group without duplicates", given: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), ), when: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), then: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), }, } @@ -125,7 +125,7 @@ func TestUnitGroup_Select(t *testing.T) { { name: "GIVEN no units matches, THEN return an empty group", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2name", "type", specter.Source{}), }, when: func(u specter.Unit) bool { return false @@ -135,14 +135,14 @@ func TestUnitGroup_Select(t *testing.T) { { name: "GIVEN units matches, THEN return a group with only matching units", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, when: func(u specter.Unit) bool { - return u.Name() == "unit2" + return u.ID() == "unit2" }, then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, }, } @@ -158,13 +158,13 @@ func TestUnitGroup_SelectType(t *testing.T) { tests := []struct { name string given specter.UnitGroup - when specter.UnitType + when specter.UnitKind then specter.UnitGroup }{ { name: "GIVEN no units matches, THEN return an empty group", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2name", "type", specter.Source{}), }, when: "not_found", then: specter.UnitGroup{}, @@ -172,12 +172,12 @@ func TestUnitGroup_SelectType(t *testing.T) { { name: "GIVEN a unit matches, THEN return a group with matching unit", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type1", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type2", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type1", specter.Source{}), + testutils.NewUnitStub("unit2", "type2", specter.Source{}), }, when: "type1", then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type1", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type1", specter.Source{}), }, }, } @@ -193,23 +193,23 @@ func TestUnitGroup_SelectName(t *testing.T) { tests := []struct { name string given specter.UnitGroup - when specter.UnitName + when specter.UnitID then specter.Unit }{ { name: "GIVEN a group with multiple units WHEN selecting an existing name THEN return the corresponding unit", given: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), when: "unit2", - then: &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + then: testutils.NewUnitStub("unit2", "type", specter.Source{}), }, { name: "GIVEN a group with multiple units WHEN selecting a non-existent name THEN return nil", given: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), when: "spec3", then: nil, @@ -234,26 +234,26 @@ func TestUnitGroup_SelectNames(t *testing.T) { tests := []struct { name string given specter.UnitGroup - when []specter.UnitName + when []specter.UnitID then specter.UnitGroup }{ { name: "GIVEN no units matches, THEN return a group with no values", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("name", "type", specter.Source{}), }, - when: []specter.UnitName{"not_found"}, + when: []specter.UnitID{"not_found"}, then: specter.UnitGroup{}, }, { name: "GIVEN a unit matches, THEN return a group with matching unit", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, - when: []specter.UnitName{"unit1"}, + when: []specter.UnitID{"unit1"}, then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), }, }, } @@ -275,20 +275,20 @@ func TestUnitGroup_Exclude(t *testing.T) { { name: "GIVEN no units matches, THEN return a group with the same values", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("name", "type", specter.Source{}), }, when: func(u specter.Unit) bool { return false }, then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("name", "type", specter.Source{}), }, }, { name: "GIVEN units matches, THEN return a group without matching units", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, when: func(u specter.Unit) bool { return true @@ -308,28 +308,28 @@ func TestUnitGroup_ExcludeType(t *testing.T) { tests := []struct { name string given specter.UnitGroup - when specter.UnitType + when specter.UnitKind then specter.UnitGroup }{ { name: "GIVEN no units matches, THEN return a group with the same values", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2name", "type", specter.Source{}), }, when: "not_found", then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2name", "type", specter.Source{}), }, }, { name: "GIVEN a unit matches, THEN return a group without matching unit", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type1", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type2", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type1", specter.Source{}), + testutils.NewUnitStub("unit2", "type2", specter.Source{}), }, when: "type1", then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2", TypeName: "type2", Src: specter.Source{}}, + testutils.NewUnitStub("unit2", "type2", specter.Source{}), }, }, } @@ -345,28 +345,28 @@ func TestUnitGroup_ExcludeNames(t *testing.T) { tests := []struct { name string given specter.UnitGroup - when []specter.UnitName + when []specter.UnitID then specter.UnitGroup }{ { name: "GIVEN no units matches, THEN return a group with the same values", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2name", "type", specter.Source{}), }, - when: []specter.UnitName{"not_found"}, + when: []specter.UnitID{"not_found"}, then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2name", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2name", "type", specter.Source{}), }, }, { name: "GIVEN a unit matches, THEN return a group without matching unit", given: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, - when: []specter.UnitName{"unit1"}, + when: []specter.UnitID{"unit1"}, then: specter.UnitGroup{ - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit2", "type", specter.Source{}), }, }, } @@ -388,11 +388,11 @@ func TestMapUnitGroup(t *testing.T) { { name: "GIVEN a group with multiple units WHEN mapped to their names THEN return a slice of unit names", given: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), when: func(u specter.Unit) string { - return string(u.Name()) + return string(u.ID()) }, then: []string{"unit1", "unit2"}, }, @@ -400,15 +400,15 @@ func TestMapUnitGroup(t *testing.T) { name: "GIVEN an empty group WHEN mapped THEN return a nil slice", given: specter.NewUnitGroup(), when: func(u specter.Unit) string { - return string(u.Name()) + return string(u.ID()) }, then: nil, }, { name: "GIVEN a group with multiple units WHEN mapped to a constant value THEN return a slice of that value", given: specter.NewUnitGroup( - &testutils.UnitStub{Name_: "unit1", TypeName: "type", Src: specter.Source{}}, - &testutils.UnitStub{Name_: "unit2", TypeName: "type", Src: specter.Source{}}, + testutils.NewUnitStub("unit1", "type", specter.Source{}), + testutils.NewUnitStub("unit2", "type", specter.Source{}), ), when: func(u specter.Unit) string { return "constant" @@ -424,3 +424,104 @@ func TestMapUnitGroup(t *testing.T) { }) } } + +func TestUnitGroup_Find(t *testing.T) { + type then struct { + unit specter.Unit + found bool + } + tests := []struct { + name string + given specter.UnitGroup + when specter.UnitMatcher + then then + }{ + { + name: "WHEN a unit matches THEN should return unit and true", + given: specter.UnitGroup{ + testutils.NewUnitStub("", "", specter.Source{}), + }, + when: func(u specter.Unit) bool { + return true + }, + then: then{ + unit: testutils.NewUnitStub("", "", specter.Source{}), + found: true, + }, + }, + { + name: "WHEN no unit matches THEN should return nil and false", + given: specter.UnitGroup{ + testutils.NewUnitStub("", "", specter.Source{}), + }, + when: func(u specter.Unit) bool { + return false + }, + then: then{ + unit: nil, + found: false, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, found := tt.given.Find(tt.when) + require.Equal(t, tt.then.found, found) + require.Equal(t, tt.then.unit, got) + }) + } +} + +func TestUnitOf(t *testing.T) { + t.Run("Attributes should be set to passed values", func(t *testing.T) { + value := "hello-world" + var unitID specter.UnitID = "unitID" + var unitKind specter.UnitKind = "kind" + unitSource := specter.Source{ + Location: "/path/to/file", + Data: []byte(`some data`), + Format: "txt", + } + u := specter.UnitOf(value, unitID, unitKind, unitSource) + require.NotNil(t, u) + require.Equal(t, unitID, u.ID()) + require.Equal(t, unitKind, u.Kind()) + require.Equal(t, unitSource, u.Source()) + require.Equal(t, value, u.Unwrap()) + }) +} + +func TestUnitWithKindMatcher(t *testing.T) { + type when struct { + kind specter.UnitKind + unit specter.Unit + } + tests := []struct { + name string + when when + then bool + }{ + { + name: "WHEN unit with kind THEN return true", + when: when{ + kind: "kind", + unit: testutils.NewUnitStub("unit1", "kind", specter.Source{}), + }, + then: true, + }, + { + name: "WHEN unit not with kind THEN return false", + when: when{ + kind: "kind", + unit: testutils.NewUnitStub("unit1", "not_kind", specter.Source{}), + }, + then: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := specter.UnitWithKindMatcher(tt.when.kind)(tt.when.unit) + assert.Equal(t, tt.then, got) + }) + } +} diff --git a/pkg/specter/unitproc.go b/pkg/specter/unitproc.go index 2aa6ff9..e2f7826 100644 --- a/pkg/specter/unitproc.go +++ b/pkg/specter/unitproc.go @@ -14,17 +14,18 @@ package specter -import "context" +import ( + "context" +) -type ProcessingContext struct { +type UnitProcessingContext struct { context.Context Units UnitGroup Artifacts []Artifact - Logger Logger } // Artifact returns an artifact by its ID. -func (c ProcessingContext) Artifact(id ArtifactID) Artifact { +func (c UnitProcessingContext) Artifact(id ArtifactID) Artifact { for _, o := range c.Artifacts { if o.ID() == id { return o @@ -41,20 +42,22 @@ type UnitProcessor interface { Name() string // Process processes a group of units. - Process(ctx ProcessingContext) ([]Artifact, error) + Process(ctx UnitProcessingContext) ([]Artifact, error) } -// ArtifactProcessor are services responsible for processing artifacts of UnitProcessors. -type ArtifactProcessor interface { - // Process performs the processing of artifacts generated by UnitProcessor. - Process(ctx ArtifactProcessingContext) error - - // Name returns the name of this processor. - Name() string -} - -func GetContextArtifact[T Artifact](ctx ProcessingContext, id ArtifactID) T { +func GetContextArtifact[T Artifact](ctx UnitProcessingContext, id ArtifactID) T { artifact := ctx.Artifact(id) v, _ := artifact.(T) return v } + +type UnitProcessorFunc struct { + name string + processFunc func(ctx UnitProcessingContext) ([]Artifact, error) +} + +func (a UnitProcessorFunc) Process(ctx UnitProcessingContext) ([]Artifact, error) { + return a.processFunc(ctx) +} + +func (a UnitProcessorFunc) Name() string { return a.name } diff --git a/pkg/specter/unitproc_test.go b/pkg/specter/unitproc_test.go index f7da664..fa31b36 100644 --- a/pkg/specter/unitproc_test.go +++ b/pkg/specter/unitproc_test.go @@ -16,6 +16,7 @@ package specter_test import ( "github.com/morebec/specter/pkg/specter" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" ) @@ -67,7 +68,7 @@ func TestProcessorArtifactRegistry_FindByID(t *testing.T) { func TestGetContextArtifact(t *testing.T) { type when struct { - ctx specter.ProcessingContext + ctx specter.UnitProcessingContext id specter.ArtifactID } type then[T specter.Artifact] struct { @@ -82,7 +83,7 @@ func TestGetContextArtifact(t *testing.T) { { name: "GIVEN no artifact matches THEN return nil", when: when{ - ctx: specter.ProcessingContext{}, + ctx: specter.UnitProcessingContext{}, id: "not_found", }, then: then[*specter.FileArtifact]{ @@ -92,7 +93,7 @@ func TestGetContextArtifact(t *testing.T) { { name: "GIVEN artifact matches THEN return artifact", when: when{ - ctx: specter.ProcessingContext{ + ctx: specter.UnitProcessingContext{ Artifacts: []specter.Artifact{ &specter.FileArtifact{Path: "/path/to/file"}, }, @@ -111,3 +112,24 @@ func TestGetContextArtifact(t *testing.T) { }) } } + +func TestUnitProcessorFunc(t *testing.T) { + t.Run("Name should be set", func(t *testing.T) { + a := specter.NewUnitProcessorFunc("name", func(ctx specter.UnitProcessingContext) ([]specter.Artifact, error) { + return nil, nil + }) + require.Equal(t, "name", a.Name()) + }) + + t.Run("Process should be called", func(t *testing.T) { + called := false + a := specter.NewUnitProcessorFunc("name", func(ctx specter.UnitProcessingContext) ([]specter.Artifact, error) { + called = true + return nil, assert.AnError + }) + + _, err := a.Process(specter.UnitProcessingContext{}) + require.Error(t, err) + require.True(t, called) + }) +} diff --git a/pkg/specterutils/depresolve.go b/pkg/specterutils/depresolve.go index c00731d..f8546c4 100644 --- a/pkg/specterutils/depresolve.go +++ b/pkg/specterutils/depresolve.go @@ -21,10 +21,12 @@ import ( "strings" ) +const DependencyResolutionFailed = "specter.dependency_resolution_failed" + const ResolvedDependenciesArtifactID = "_resolved_dependencies" -// ResolvedDependencies represents an ordered list of Unit that should be processed in that specific order to avoid -// unresolved types. +// ResolvedDependencies represents an ordered list of Unit that should be +// processed in that specific order based on their dependencies. type ResolvedDependencies specter.UnitGroup func (r ResolvedDependencies) ID() specter.ArtifactID { @@ -33,7 +35,7 @@ func (r ResolvedDependencies) ID() specter.ArtifactID { type DependencyProvider interface { Supports(s specter.Unit) bool - Provide(s specter.Unit) []specter.UnitName + Provide(s specter.Unit) []specter.UnitID } var _ specter.UnitProcessor = DependencyResolutionProcessor{} @@ -50,9 +52,7 @@ func (p DependencyResolutionProcessor) Name() string { return "dependency_resolution_processor" } -func (p DependencyResolutionProcessor) Process(ctx specter.ProcessingContext) ([]specter.Artifact, error) { - ctx.Logger.Info("\nResolving dependencies...") - +func (p DependencyResolutionProcessor) Process(ctx specter.UnitProcessingContext) ([]specter.Artifact, error) { var nodes []dependencyNode for _, s := range ctx.Units { node := dependencyNode{Unit: s, Dependencies: nil} @@ -69,20 +69,19 @@ func (p DependencyResolutionProcessor) Process(ctx specter.ProcessingContext) ([ deps, err := newDependencyGraph(nodes...).resolve() if err != nil { - return nil, errors.WrapWithMessage(err, errors.InternalErrorCode, "failed resolving dependencies") + return nil, errors.WrapWithMessage(err, DependencyResolutionFailed, "failed resolving dependencies") } - ctx.Logger.Success("Dependencies resolved successfully.") return []specter.Artifact{deps}, nil } -func GetResolvedDependenciesFromContext(ctx specter.ProcessingContext) ResolvedDependencies { +func GetResolvedDependenciesFromContext(ctx specter.UnitProcessingContext) ResolvedDependencies { return specter.GetContextArtifact[ResolvedDependencies](ctx, ResolvedDependenciesArtifactID) } -type dependencySet map[specter.UnitName]struct{} +type dependencySet map[specter.UnitID]struct{} -func newDependencySet(dependencies ...specter.UnitName) dependencySet { +func newDependencySet(dependencies ...specter.UnitID) dependencySet { deps := dependencySet{} for _, d := range dependencies { deps[d] = struct{}{} @@ -109,8 +108,8 @@ type dependencyNode struct { Dependencies dependencySet } -func (d dependencyNode) UnitName() specter.UnitName { - return d.Unit.Name() +func (d dependencyNode) UnitName() specter.UnitID { + return d.Unit.ID() } type dependencyGraph []dependencyNode @@ -122,41 +121,42 @@ func newDependencyGraph(units ...dependencyNode) dependencyGraph { func (g dependencyGraph) resolve() (ResolvedDependencies, error) { var resolved ResolvedDependencies - // Look up of nodes to their typeName Names. - specByTypeNames := map[specter.UnitName]specter.Unit{} + // Look up of nodes to their IDs. + unitByID := map[specter.UnitID]specter.Unit{} // Map nodes to dependencies - dependenciesByTypeNames := map[specter.UnitName]dependencySet{} + dependenciesByID := map[specter.UnitID]dependencySet{} for _, n := range g { - specByTypeNames[n.UnitName()] = n.Unit - dependenciesByTypeNames[n.UnitName()] = n.Dependencies + unitByID[n.UnitName()] = n.Unit + dependenciesByID[n.UnitName()] = n.Dependencies } - // The algorithm simply processes all nodes and tries to find the ones that have no dependencies. - // When a node has dependencies, these dependencies are checked for being either circular or unresolvable. - // If no unresolvable or circular dependency is found, the node is considered resolved. - // And processing retries with the remaining dependent nodes. - for len(dependenciesByTypeNames) != 0 { - var typeNamesWithNoDependencies []specter.UnitName - for typeName, dependencies := range dependenciesByTypeNames { + // The algorithm simply processes all nodes and tries to find the ones that have + // no dependencies. When a node has dependencies, these dependencies are checked + // for being either circular or unresolvable. If no unresolvable or circular + // dependency is found, the node is considered resolved. And processing retries + // with the remaining dependent nodes. + for len(dependenciesByID) != 0 { + var idsWithNoDependencies []specter.UnitID + for id, dependencies := range dependenciesByID { if len(dependencies) == 0 { - typeNamesWithNoDependencies = append(typeNamesWithNoDependencies, typeName) + idsWithNoDependencies = append(idsWithNoDependencies, id) } } // If no nodes have no dependencies, in other words if all nodes have dependencies, // This means that we have a problem of circular dependencies. // We need at least one node in the graph to be independent for it to be potentially resolvable. - if len(typeNamesWithNoDependencies) == 0 { + if len(idsWithNoDependencies) == 0 { // We either have circular dependencies or an unresolved dependency // Check if all dependencies exist. - for typeName, dependencies := range dependenciesByTypeNames { + for id, dependencies := range dependenciesByID { for dependency := range dependencies { - if _, found := specByTypeNames[dependency]; !found { + if _, found := unitByID[dependency]; !found { return nil, errors.NewWithMessage( - errors.InternalErrorCode, - fmt.Sprintf("unit with type %q depends on an unresolved type %q", - typeName, + DependencyResolutionFailed, + fmt.Sprintf("unit %q depends on an unresolved kind %q", + id, dependency, ), ) @@ -166,12 +166,12 @@ func (g dependencyGraph) resolve() (ResolvedDependencies, error) { // They all exist, therefore, we have a circular dependencies. var circularDependencies []string - for k := range dependenciesByTypeNames { + for k := range dependenciesByID { circularDependencies = append(circularDependencies, string(k)) } return nil, errors.NewWithMessage( - errors.InternalErrorCode, + DependencyResolutionFailed, fmt.Sprintf( "circular dependencies found between nodes %q", strings.Join(circularDependencies, "\", \""), @@ -180,15 +180,15 @@ func (g dependencyGraph) resolve() (ResolvedDependencies, error) { } // All good, we can move the nodes that no longer have unresolved dependencies - for _, nodeTypeName := range typeNamesWithNoDependencies { - delete(dependenciesByTypeNames, nodeTypeName) - resolved = append(resolved, specByTypeNames[nodeTypeName]) + for _, nodeID := range idsWithNoDependencies { + delete(dependenciesByID, nodeID) + resolved = append(resolved, unitByID[nodeID]) } - // Remove the resolved nodes from the remaining dependenciesByTypeNames. - for typeName, dependencies := range dependenciesByTypeNames { - diff := dependencies.diff(newDependencySet(typeNamesWithNoDependencies...)) - dependenciesByTypeNames[typeName] = diff + // Remove the resolved nodes from the remaining dependenciesByID. + for ID, dependencies := range dependenciesByID { + diff := dependencies.diff(newDependencySet(idsWithNoDependencies...)) + dependenciesByID[ID] = diff } } @@ -201,7 +201,7 @@ func (g dependencyGraph) resolve() (ResolvedDependencies, error) { // to easily resolve dependencies. type HasDependencies interface { specter.Unit - Dependencies() []specter.UnitName + Dependencies() []specter.UnitID } type HasDependenciesProvider struct{} @@ -211,7 +211,7 @@ func (h HasDependenciesProvider) Supports(u specter.Unit) bool { return ok } -func (h HasDependenciesProvider) Provide(u specter.Unit) []specter.UnitName { +func (h HasDependenciesProvider) Provide(u specter.Unit) []specter.UnitID { d, ok := u.(HasDependencies) if !ok { return nil diff --git a/pkg/specterutils/depresolve_test.go b/pkg/specterutils/depresolve_test.go index f6ee0a7..cecafe4 100644 --- a/pkg/specterutils/depresolve_test.go +++ b/pkg/specterutils/depresolve_test.go @@ -21,21 +21,20 @@ import ( "github.com/morebec/specter/pkg/testutils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "os" "testing" ) // MockDependencyProvider is a mock implementation of DependencyProvider for testing. type MockDependencyProvider struct { supportFunc func(specter.Unit) bool - provideFunc func(specter.Unit) []specter.UnitName + provideFunc func(specter.Unit) []specter.UnitID } func (m *MockDependencyProvider) Supports(u specter.Unit) bool { return m.supportFunc(u) } -func (m *MockDependencyProvider) Provide(u specter.Unit) []specter.UnitName { +func (m *MockDependencyProvider) Provide(u specter.Unit) []specter.UnitID { return m.provideFunc(u) } @@ -69,17 +68,17 @@ func TestDependencyResolutionProcessor_Process(t *testing.T) { supportFunc: func(_ specter.Unit) bool { return false }, - provideFunc: func(_ specter.Unit) []specter.UnitName { + provideFunc: func(_ specter.Unit) []specter.UnitID { return nil }, }, &MockDependencyProvider{ supportFunc: func(u specter.Unit) bool { - return u.Type() == "type" + return u.Kind() == "type" }, - provideFunc: func(u specter.Unit) []specter.UnitName { - if u.Name() == "unit1" { - return []specter.UnitName{"unit2"} + provideFunc: func(u specter.Unit) []specter.UnitID { + if u.ID() == "unit1" { + return []specter.UnitID{"unit2"} } return nil }, @@ -102,13 +101,13 @@ func TestDependencyResolutionProcessor_Process(t *testing.T) { providers: []specterutils.DependencyProvider{ &MockDependencyProvider{ supportFunc: func(u specter.Unit) bool { - return u.Type() == "type" + return u.Kind() == "type" }, - provideFunc: func(u specter.Unit) []specter.UnitName { - if u.Name() == "unit1" { - return []specter.UnitName{"unit2"} - } else if u.Name() == "unit2" { - return []specter.UnitName{"unit1"} + provideFunc: func(u specter.Unit) []specter.UnitID { + if u.ID() == "unit1" { + return []specter.UnitID{"unit2"} + } else if u.ID() == "unit2" { + return []specter.UnitID{"unit1"} } return nil }, @@ -128,10 +127,10 @@ func TestDependencyResolutionProcessor_Process(t *testing.T) { providers: []specterutils.DependencyProvider{ &MockDependencyProvider{ supportFunc: func(u specter.Unit) bool { - return u.Type() == "type" + return u.Kind() == "type" }, - provideFunc: func(u specter.Unit) []specter.UnitName { - return []specter.UnitName{"spec3"} + provideFunc: func(u specter.Unit) []specter.UnitID { + return []specter.UnitID{"spec3"} }, }, }, @@ -141,19 +140,15 @@ func TestDependencyResolutionProcessor_Process(t *testing.T) { }, }, then: nil, - expectedError: errors.NewWithMessage(errors.InternalErrorCode, "depends on an unresolved type \"spec3\""), + expectedError: errors.NewWithMessage(specterutils.DependencyResolutionFailed, "depends on an unresolved kind \"spec3\""), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { processor := specterutils.NewDependencyResolutionProcessor(tt.given.providers...) - ctx := specter.ProcessingContext{ + ctx := specter.UnitProcessingContext{ Units: tt.given.units, - Logger: specter.NewDefaultLogger(specter.DefaultLoggerConfig{ - DisableColors: true, - Writer: os.Stdout, - }), } var err error @@ -176,12 +171,12 @@ func TestDependencyResolutionProcessor_Process(t *testing.T) { func TestGetResolvedDependenciesFromContext(t *testing.T) { tests := []struct { name string - given specter.ProcessingContext + given specter.UnitProcessingContext want specterutils.ResolvedDependencies }{ { name: "GIVEN a context with resolved dependencies THEN return resolved dependencies", - given: specter.ProcessingContext{ + given: specter.UnitProcessingContext{ Artifacts: []specter.Artifact{ specterutils.ResolvedDependencies{ specterutils.NewGenericUnit("name", "type", specter.Source{}), @@ -194,7 +189,7 @@ func TestGetResolvedDependenciesFromContext(t *testing.T) { }, { name: "GIVEN a context with resolved dependencies with wrong type THEN return nil", - given: specter.ProcessingContext{ + given: specter.UnitProcessingContext{ Artifacts: []specter.Artifact{ testutils.NewArtifactStub(specterutils.ResolvedDependenciesArtifactID), }, @@ -203,7 +198,7 @@ func TestGetResolvedDependenciesFromContext(t *testing.T) { }, { name: "GIVEN a context without resolved dependencies THEN return nil", - given: specter.ProcessingContext{}, + given: specter.UnitProcessingContext{}, want: nil, }, } @@ -222,14 +217,14 @@ func TestDependencyResolutionProcessor_Name(t *testing.T) { type hasDependencyUnit struct { source specter.Source - dependencies []specter.UnitName + dependencies []specter.UnitID } -func (h *hasDependencyUnit) Name() specter.UnitName { +func (h *hasDependencyUnit) ID() specter.UnitID { return "unit" } -func (h *hasDependencyUnit) Type() specter.UnitType { +func (h *hasDependencyUnit) Kind() specter.UnitKind { return "unit" } @@ -241,11 +236,7 @@ func (h *hasDependencyUnit) Source() specter.Source { return h.source } -func (h *hasDependencyUnit) SetSource(s specter.Source) { - h.source = s -} - -func (h *hasDependencyUnit) Dependencies() []specter.UnitName { +func (h *hasDependencyUnit) Dependencies() []specter.UnitID { return h.dependencies } @@ -278,7 +269,7 @@ func TestHasDependenciesProvider_Provide(t *testing.T) { tests := []struct { name string given specter.Unit - then []specter.UnitName + then []specter.UnitID }{ { name: "GIVEN unit not implementing HasDependencies THEN return nil", @@ -287,8 +278,8 @@ func TestHasDependenciesProvider_Provide(t *testing.T) { }, { name: "GIVEN unit implementing HasDependencies THEN return dependencies", - given: &hasDependencyUnit{dependencies: []specter.UnitName{"unit1"}}, - then: []specter.UnitName{"unit1"}, + given: &hasDependencyUnit{dependencies: []specter.UnitID{"unit1"}}, + then: []specter.UnitID{"unit1"}, }, } for _, tt := range tests { diff --git a/pkg/specterutils/genericunit.go b/pkg/specterutils/genericunit.go index 5dfea06..4f33f66 100644 --- a/pkg/specterutils/genericunit.go +++ b/pkg/specterutils/genericunit.go @@ -23,18 +23,14 @@ import ( // GenericUnit is a generic implementation of a Unit that saves its attributes in a list of attributes for introspection. // these can be useful for loaders that are looser in what they allow. type GenericUnit struct { - UnitName specter.UnitName - typ specter.UnitType + UnitID specter.UnitID + typ specter.UnitKind source specter.Source Attributes []GenericUnitAttribute } -func NewGenericUnit(name specter.UnitName, typ specter.UnitType, source specter.Source) *GenericUnit { - return &GenericUnit{UnitName: name, typ: typ, source: source} -} - -func (u *GenericUnit) SetSource(src specter.Source) { - u.source = src +func NewGenericUnit(name specter.UnitID, typ specter.UnitKind, source specter.Source) *GenericUnit { + return &GenericUnit{UnitID: name, typ: typ, source: source} } func (u *GenericUnit) Description() string { @@ -46,11 +42,11 @@ func (u *GenericUnit) Description() string { return attr.Value.String() } -func (u *GenericUnit) Name() specter.UnitName { - return u.UnitName +func (u *GenericUnit) ID() specter.UnitID { + return u.UnitID } -func (u *GenericUnit) Type() specter.UnitType { +func (u *GenericUnit) Kind() specter.UnitKind { return u.typ } @@ -58,7 +54,7 @@ func (u *GenericUnit) Source() specter.Source { return u.source } -// Attribute returns an attribute by its FilePath or nil if it was not found. +// Attribute returns an attribute by its name or nil if it was not found. func (u *GenericUnit) Attribute(name string) *GenericUnitAttribute { for _, a := range u.Attributes { if a.Name == name { @@ -82,11 +78,6 @@ func (u *GenericUnit) HasAttribute(name string) bool { // AttributeType represents the type of attribute type AttributeType string -const ( - // Unknown is used for attributes where the actual type is unknown. - Unknown = "any" -) - // GenericUnitAttribute represents an attribute of a unit. // It relies on cty.Value to represent the loaded value. type GenericUnitAttribute struct { @@ -123,5 +114,5 @@ type ObjectValue struct { } func (o ObjectValue) String() string { - return fmt.Sprintf("ObjectValue{Type: %s, Attributes: %v}", o.Type, o.Attributes) + return fmt.Sprintf("ObjectValue{Kind: %s, Attributes: %v}", o.Type, o.Attributes) } diff --git a/pkg/specterutils/genericunit_test.go b/pkg/specterutils/genericunit_test.go index 17f6b01..814b06c 100644 --- a/pkg/specterutils/genericunit_test.go +++ b/pkg/specterutils/genericunit_test.go @@ -15,7 +15,6 @@ package specterutils_test import ( - "github.com/morebec/specter/pkg/specter" "github.com/morebec/specter/pkg/specterutils" "github.com/stretchr/testify/assert" "github.com/zclconf/go-cty/cty" @@ -148,32 +147,9 @@ func TestGenericUnit_HasAttribute(t *testing.T) { } } -func TestGenericUnit_SetSource(t *testing.T) { - tests := []struct { - name string - given *specterutils.GenericUnit - when specter.Source - then specter.Source - }{ - { - name: "GIVEN a unit WHEN SetSource is called THEN updates the source", - given: specterutils.NewGenericUnit("name", "type", specter.Source{Location: "initial/path"}), - when: specter.Source{Location: "new/path"}, - then: specter.Source{Location: "new/path"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.given.SetSource(tt.when) - assert.Equal(t, tt.then, tt.given.Source()) - }) - } -} - func TestObjectValue_String(t *testing.T) { o := specterutils.ObjectValue{Type: "hello", Attributes: []specterutils.GenericUnitAttribute{ {Name: "hello", Value: specterutils.GenericValue{Value: cty.StringVal("world")}}, }} - assert.Equal(t, "ObjectValue{Type: hello, Attributes: [{hello world}]}", o.String()) + assert.Equal(t, "ObjectValue{Kind: hello, Attributes: [{hello world}]}", o.String()) } diff --git a/pkg/specterutils/hcl.go b/pkg/specterutils/hcl.go index 9818712..79b6ec7 100644 --- a/pkg/specterutils/hcl.go +++ b/pkg/specterutils/hcl.go @@ -100,15 +100,15 @@ func (l HCLGenericUnitLoader) Load(s specter.Source) ([]specter.Unit, error) { // u.Location, // block.Range().Start.Line, // block.Range().Start.Column, - // block.Type, + // block.Kind, // ), //) } // Create unit and add to list units = append(units, &GenericUnit{ - UnitName: specter.UnitName(block.Labels[0]), - typ: specter.UnitType(block.Type), + UnitID: specter.UnitID(block.Labels[0]), + typ: specter.UnitKind(block.Type), source: s, Attributes: specAttributes, }) @@ -172,7 +172,7 @@ type HCLUnitLoaderFileConfigurationProvider func() HCLFileConfig // HCLFileConfig interface that is to be implemented to define the structure of HCL unit files. type HCLFileConfig interface { - Units() []specter.Unit + Units(specter.Source) []specter.Unit } // HCLVariableConfig represents a block configuration that allows defining variables. @@ -221,7 +221,7 @@ func (l HCLUnitLoader) Load(s specter.Source) ([]specter.Unit, error) { // //body := parsedFile.Body.(*hclsyntax.Body) //for _, b := range body.Blocks { - // if b.Type == "const" { + // if b.Kind == "const" { // v, d := b.Body.Attributes["value"].Expr.Value(ctx) // if d != nil && d.HasErrors() { // diags = append(diags, d...) @@ -243,12 +243,7 @@ func (l HCLUnitLoader) Load(s specter.Source) ([]specter.Unit, error) { return nil, errors.Wrap(err, InvalidHCLErrorCode) } - // Set source for all units - units := fileConf.Units() - for _, sp := range units { - sp.SetSource(s) - } - return units, nil + return fileConf.Units(s), nil } func (l HCLUnitLoader) SupportsSource(s specter.Source) bool { diff --git a/pkg/specterutils/hcl_test.go b/pkg/specterutils/hcl_test.go index d197081..90e7746 100644 --- a/pkg/specterutils/hcl_test.go +++ b/pkg/specterutils/hcl_test.go @@ -68,7 +68,7 @@ func TestHCLGenericUnitLoader_Load(t *testing.T) { expectedError require.ErrorAssertionFunc } - mockFile := HclConfigMock{} + hclFileStub := HclConfigMock{} tests := []struct { name string @@ -89,13 +89,13 @@ func TestHCLGenericUnitLoader_Load(t *testing.T) { }, }, { - name: "WHEN a valid hcl file THEN the specs should be returned an no error", + name: "WHEN a valid hcl source THEN the units should be returned without any error", when: when{ - source: mockFile.source(), + source: hclFileStub.source(), }, then: then{ expectedUnits: []specter.Unit{ - mockFile.genericUnit(), + hclFileStub.expectedUnit(hclFileStub.source()), }, expectedError: require.NoError, }, @@ -192,7 +192,10 @@ specType "specName" { require.NoError(t, err) } - assert.Equal(t, tt.then.expectedUnits, actualUnits) + require.Len(t, actualUnits, len(tt.then.expectedUnits)) + for i := range tt.then.expectedUnits { + assert.Equal(t, tt.then.expectedUnits[i], actualUnits[i]) + } }) } } @@ -216,7 +219,7 @@ func TestHCLUnitLoader_Load(t *testing.T) { then then }{ { - name: "WHEN an empty file THEN return nil", + name: "WHEN an empty source THEN return nil", when: when{ source: specter.Source{ Format: specterutils.HCLSourceFormat, @@ -229,7 +232,7 @@ func TestHCLUnitLoader_Load(t *testing.T) { }, }, { - name: "WHEN an unsupported file format THEN an error should be returned", + name: "WHEN an unsupported source format THEN an error should be returned", when: when{ source: specter.Source{ Format: "txt", @@ -241,11 +244,11 @@ func TestHCLUnitLoader_Load(t *testing.T) { }, }, { - name: "WHEN an unparsable hcl file THEN an error should be returned", + name: "WHEN an unparsable hcl source THEN an error should be returned", when: when{ source: specter.Source{ Data: []byte(` -con st = var o +con st = var o # this cannot be parsed. `), Format: specterutils.HCLSourceFormat, }, @@ -256,13 +259,13 @@ con st = var o }, }, { - name: "WHEN valid hcl file THEN return units", + name: "WHEN valid hcl source THEN return units", when: when{ source: mockFile.source(), }, then: then{ expectedUnits: []specter.Unit{ - mockFile.genericUnit(), + mockFile.expectedUnit(mockFile.source()), }, }, }, @@ -298,7 +301,13 @@ type HclConfigMock struct { } `hcl:"service,block"` } -func (m *HclConfigMock) data() []byte { +func (m *HclConfigMock) Units(s specter.Source) []specter.Unit { + return []specter.Unit{ + m.expectedUnit(s), + } +} + +func (m *HclConfigMock) ContentBytes() []byte { return []byte(` service "specter" { image = "specter:1.0.0" @@ -309,14 +318,8 @@ service "specter" { `) } -func (m *HclConfigMock) Units() []specter.Unit { - return []specter.Unit{ - m.genericUnit(), - } -} - -func (m *HclConfigMock) genericUnit() *specterutils.GenericUnit { - unit := specterutils.NewGenericUnit("specter", "service", m.source()) +func (m *HclConfigMock) expectedUnit(s specter.Source) *specterutils.GenericUnit { + unit := specterutils.NewGenericUnit("specter", "service", s) unit.Attributes = append(unit.Attributes, specterutils.GenericUnitAttribute{ Name: "image", @@ -343,8 +346,8 @@ func (m *HclConfigMock) genericUnit() *specterutils.GenericUnit { func (m *HclConfigMock) source() specter.Source { return specter.Source{ - Location: "specter.hcl", - Data: m.data(), Format: specterutils.HCLSourceFormat, + Location: "/path/to/file.hcl", + Data: m.ContentBytes(), } } diff --git a/pkg/specterutils/linting.go b/pkg/specterutils/linting.go index ec28bce..a15a4d0 100644 --- a/pkg/specterutils/linting.go +++ b/pkg/specterutils/linting.go @@ -18,14 +18,14 @@ import ( "fmt" "github.com/morebec/go-errors/errors" "github.com/morebec/specter/pkg/specter" + "io" "strings" - "unicode" ) const LinterResultArtifactID = "_linting_processor_results" -// UndefinedUnitName constant used to test against undefined UnitName. -const UndefinedUnitName specter.UnitName = "" +// UndefinedUnitID constant used to test against undefined specter.UnitID. +const UndefinedUnitID specter.UnitID = "" const LintingErrorCode = "specter.spec_processing.linting_error" @@ -38,6 +38,7 @@ const ( type LintingProcessor struct { linters []UnitLinter + Logger specter.Logger } func NewLintingProcessor(linters ...UnitLinter) *LintingProcessor { @@ -48,9 +49,12 @@ func (l LintingProcessor) Name() string { return "linting_processor" } -func (l LintingProcessor) Process(ctx specter.ProcessingContext) (artifacts []specter.Artifact, err error) { +func (l LintingProcessor) Process(ctx specter.UnitProcessingContext) (artifacts []specter.Artifact, err error) { + if l.Logger == nil { + l.Logger = specter.NewDefaultLogger(specter.DefaultLoggerConfig{Writer: io.Discard}) + } + linter := CompositeUnitLinter(l.linters...) - ctx.Logger.Info("\nLinting units ...") lr := linter.Lint(ctx.Units) @@ -58,26 +62,26 @@ func (l LintingProcessor) Process(ctx specter.ProcessingContext) (artifacts []sp if lr.HasWarnings() { for _, w := range lr.Warnings() { - ctx.Logger.Warning(fmt.Sprintf("Warning: %s\n", w.Message)) + l.Logger.Warning(fmt.Sprintf("Warning: %s\n", w.Message)) } } if lr.HasErrors() { for _, e := range lr.Errors().Errors { - ctx.Logger.Error(fmt.Sprintf("Error: %s\n", e.Error())) + l.Logger.Error(fmt.Sprintf("Error: %s\n", e.Error())) } err = lr.Errors() } if !lr.HasWarnings() && !lr.HasErrors() { - ctx.Logger.Success("Units linted successfully.") + l.Logger.Success("Units linted successfully.") } return artifacts, err } -func GetLintingResultsFromContext(ctx specter.ProcessingContext) LinterResultSet { +func GetLintingResultsFromContext(ctx specter.UnitProcessingContext) LinterResultSet { return specter.GetContextArtifact[LinterResultSet](ctx, LinterResultArtifactID) } @@ -155,16 +159,16 @@ func CompositeUnitLinter(linters ...UnitLinter) UnitLinterFunc { } } -// UnitMustNotHaveUndefinedNames ensures that no unit has an undefined name -func UnitMustNotHaveUndefinedNames(severity LinterResultSeverity) UnitLinterFunc { +// UnitsMustHaveIDs ensures that no unit has an undefined ID. +func UnitsMustHaveIDs(severity LinterResultSeverity) UnitLinterFunc { return func(units specter.UnitGroup) LinterResultSet { var result LinterResultSet for _, u := range units { - if u.Name() == UndefinedUnitName { + if u.ID() == UndefinedUnitID { result = append(result, LinterResult{ Severity: severity, - Message: fmt.Sprintf("unit at %q has an undefined name", u.Source().Location), + Message: fmt.Sprintf("a unit of kind %q has no ID at %q", u.Kind(), u.Source().Location), }) } } @@ -173,24 +177,25 @@ func UnitMustNotHaveUndefinedNames(severity LinterResultSeverity) UnitLinterFunc } } -// UnitsMustHaveUniqueNames ensures that names are unique amongst units. -func UnitsMustHaveUniqueNames(severity LinterResultSeverity) UnitLinterFunc { +// UnitsIDsMustBeUnique ensures that units all have unique IDs. +func UnitsIDsMustBeUnique(severity LinterResultSeverity) UnitLinterFunc { return func(units specter.UnitGroup) LinterResultSet { var result LinterResultSet - // Where key is the type FilePath and the array contains all the unit file locations where it was encountered. - encounteredNames := map[specter.UnitName][]string{} + // Where key is the type ID and the array contains all the unit file locations where it was encountered. + // TODO simplify + encounteredIDs := map[specter.UnitID][]string{} for _, u := range units { - if _, found := encounteredNames[u.Name()]; found { - encounteredNames[u.Name()] = append(encounteredNames[u.Name()], u.Source().Location) + if _, found := encounteredIDs[u.ID()]; found { + encounteredIDs[u.ID()] = append(encounteredIDs[u.ID()], u.Source().Location) } else { - encounteredNames[u.Name()] = []string{u.Source().Location} + encounteredIDs[u.ID()] = []string{u.Source().Location} } } - for name, files := range encounteredNames { + for id, files := range encounteredIDs { if len(files) > 1 { // Deduplicate fnMap := map[string]struct{}{} @@ -205,8 +210,8 @@ func UnitsMustHaveUniqueNames(severity LinterResultSeverity) UnitLinterFunc { result = append(result, LinterResult{ Severity: severity, Message: fmt.Sprintf( - "duplicate unit name detected for %q in the following file(s): %s", - name, + "duplicate unit ID detected %q in the following file(s): %s", + id, strings.Join(fileNames, ", "), ), }) @@ -216,55 +221,3 @@ func UnitsMustHaveUniqueNames(severity LinterResultSeverity) UnitLinterFunc { return result } } - -// UnitsMustHaveDescriptionAttribute ensures that all units have a description. -func UnitsMustHaveDescriptionAttribute(severity LinterResultSeverity) UnitLinterFunc { - return func(units specter.UnitGroup) LinterResultSet { - var result LinterResultSet - for _, u := range units { - if u.Description() == "" { - result = append(result, LinterResult{ - Severity: severity, - Message: fmt.Sprintf("unit %q at location %q does not have a description", u.Name(), u.Source().Location), - }) - } - } - return result - } -} - -// UnitsDescriptionsMustStartWithACapitalLetter ensures that unit descriptions start with a capital letter. -func UnitsDescriptionsMustStartWithACapitalLetter(severity LinterResultSeverity) UnitLinterFunc { - return func(units specter.UnitGroup) LinterResultSet { - var result LinterResultSet - for _, u := range units { - if u.Description() != "" { - firstLetter := rune(u.Description()[0]) - if unicode.IsUpper(firstLetter) { - continue - } - } - result = append(result, LinterResult{ - Severity: severity, - Message: fmt.Sprintf("the description of unit %q at location %q does not start with a capital letter", u.Name(), u.Source().Location), - }) - } - return result - } -} - -// UnitsDescriptionsMustEndWithPeriod ensures that unit descriptions end with a period. -func UnitsDescriptionsMustEndWithPeriod(severity LinterResultSeverity) UnitLinterFunc { - return func(units specter.UnitGroup) LinterResultSet { - var result LinterResultSet - for _, u := range units { - if !strings.HasSuffix(u.Description(), ".") { - result = append(result, LinterResult{ - Severity: severity, - Message: fmt.Sprintf("the description of unit %q at location %q does not end with a period", u.Name(), u.Source().Location), - }) - } - } - return result - } -} diff --git a/pkg/specterutils/linting_test.go b/pkg/specterutils/linting_test.go index 5aad69f..dfcf8aa 100644 --- a/pkg/specterutils/linting_test.go +++ b/pkg/specterutils/linting_test.go @@ -25,171 +25,7 @@ import ( "testing" ) -func TestUnitsDescriptionsMustStartWithACapitalLetter(t *testing.T) { - tests := []struct { - name string - given specter.UnitGroup - then specterutils.LinterResultSet - }{ - { - name: "GIVEN unit starting with an upper case letter THEN return empty result set", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - Attributes: []specterutils.GenericUnitAttribute{ - { - Name: "description", - Value: specterutils.GenericValue{Value: cty.StringVal("It starts with UPPERCASE")}, - }, - }, - }, - }, - }, - { - name: "GIVEN unit starting with lower case letter THEN return error", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - Attributes: []specterutils.GenericUnitAttribute{ - { - Name: "description", - Value: specterutils.GenericValue{Value: cty.StringVal("it starts with lowercase")}, - }, - }, - }, - }, - then: specterutils.LinterResultSet{ - { - Severity: specterutils.ErrorSeverity, - Message: "the description of unit \"test\" at location \"\" does not start with a capital letter", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - linter := specterutils.UnitsDescriptionsMustStartWithACapitalLetter(specterutils.ErrorSeverity) - result := linter.Lint(tt.given) - require.Equal(t, tt.then, result) - }) - } -} - -func TestUnitsDescriptionsMustEndWithPeriod(t *testing.T) { - tests := []struct { - name string - given specter.UnitGroup - then specterutils.LinterResultSet - }{ - { - name: "GIVEN unit ending with period THEN return empty result set", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - Attributes: []specterutils.GenericUnitAttribute{ - { - Name: "description", - Value: specterutils.GenericValue{Value: cty.StringVal("it ends with period.")}, - }, - }, - }, - }, - }, - { - name: "GIVEN unit not ending with period THEN return error", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - Attributes: []specterutils.GenericUnitAttribute{ - { - Name: "description", - Value: specterutils.GenericValue{Value: cty.StringVal("it starts with lowercase")}, - }, - }, - }, - }, - then: specterutils.LinterResultSet{ - { - Severity: specterutils.ErrorSeverity, - Message: "the description of unit \"test\" at location \"\" does not end with a period", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - linter := specterutils.UnitsDescriptionsMustEndWithPeriod(specterutils.ErrorSeverity) - result := linter.Lint(tt.given) - require.Equal(t, tt.then, result) - }) - } -} - -func TestUnitsMustHaveDescriptionAttribute(t *testing.T) { - tests := []struct { - name string - given specter.UnitGroup - then specterutils.LinterResultSet - }{ - { - name: "GIVEN unit with a description THEN return empty result set", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - Attributes: []specterutils.GenericUnitAttribute{ - { - Name: "description", - Value: specterutils.GenericValue{Value: cty.StringVal("I have a description")}, - }, - }, - }, - }, - }, - { - name: "GIVEN unit with no description ", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - }, - }, - then: specterutils.LinterResultSet{ - { - Severity: specterutils.ErrorSeverity, - Message: "unit \"test\" at location \"\" does not have a description", - }, - }, - }, - { - name: "GIVEN unit with an empty description THEN return error", - given: specter.UnitGroup{ - &specterutils.GenericUnit{ - UnitName: "test", - Attributes: []specterutils.GenericUnitAttribute{ - { - Name: "description", - Value: specterutils.GenericValue{Value: cty.StringVal("")}, - }, - }, - }, - }, - then: specterutils.LinterResultSet{ - { - Severity: specterutils.ErrorSeverity, - Message: "unit \"test\" at location \"\" does not have a description", - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - linter := specterutils.UnitsMustHaveDescriptionAttribute(specterutils.ErrorSeverity) - result := linter.Lint(tt.given) - require.Equal(t, tt.then, result) - }) - } -} - -func TestUnitsMustHaveUniqueNames(t *testing.T) { +func TestUnitsIDsMustBeUnique(t *testing.T) { tests := []struct { name string given specter.UnitGroup @@ -199,41 +35,41 @@ func TestUnitsMustHaveUniqueNames(t *testing.T) { name: "GIVEN units with unique names THEN return empty result set", given: specter.UnitGroup{ &specterutils.GenericUnit{ - UnitName: "test", + UnitID: "test", }, &specterutils.GenericUnit{ - UnitName: "test2", + UnitID: "test2", }, }, }, { - name: "GIVEN units with non-unique names THEN return error", + name: "GIVEN units with non-unique IDs THEN return error", given: specter.UnitGroup{ &specterutils.GenericUnit{ - UnitName: "test", + UnitID: "test", }, &specterutils.GenericUnit{ - UnitName: "test", + UnitID: "test", }, }, then: specterutils.LinterResultSet{ { Severity: specterutils.ErrorSeverity, - Message: "duplicate unit name detected for \"test\" in the following file(s): ", + Message: "duplicate unit ID detected \"test\" in the following file(s): ", }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - linter := specterutils.UnitsMustHaveUniqueNames(specterutils.ErrorSeverity) + linter := specterutils.UnitsIDsMustBeUnique(specterutils.ErrorSeverity) result := linter.Lint(tt.given) require.Equal(t, tt.then, result) }) } } -func TestUnitMustNotHaveUndefinedNames(t *testing.T) { +func UnitsMustHaveIDs(t *testing.T) { tests := []struct { name string given specter.UnitGroup @@ -243,7 +79,7 @@ func TestUnitMustNotHaveUndefinedNames(t *testing.T) { name: "GIVEN unit with a name THEN return empty result set", given: specter.UnitGroup{ &specterutils.GenericUnit{ - UnitName: "test", + UnitID: "test", }, }, }, @@ -251,7 +87,7 @@ func TestUnitMustNotHaveUndefinedNames(t *testing.T) { name: "GIVEN unit with no name THEN return error ", given: specter.UnitGroup{ &specterutils.GenericUnit{ - UnitName: "", + UnitID: "", }, }, then: specterutils.LinterResultSet{ @@ -264,7 +100,7 @@ func TestUnitMustNotHaveUndefinedNames(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - linter := specterutils.UnitMustNotHaveUndefinedNames(specterutils.ErrorSeverity) + linter := specterutils.UnitsMustHaveIDs(specterutils.ErrorSeverity) result := linter.Lint(tt.given) require.Equal(t, tt.then, result) }) @@ -285,13 +121,12 @@ func TestCompositeUnitLinter(t *testing.T) { name: "GIVEN valid units THEN return empty result set", given: args{ linters: []specterutils.UnitLinter{ - specterutils.UnitMustNotHaveUndefinedNames(specterutils.ErrorSeverity), - specterutils.UnitsDescriptionsMustStartWithACapitalLetter(specterutils.ErrorSeverity), - specterutils.UnitsDescriptionsMustEndWithPeriod(specterutils.ErrorSeverity), + specterutils.UnitsMustHaveIDs(specterutils.ErrorSeverity), + specterutils.UnitsIDsMustBeUnique(specterutils.ErrorSeverity), }, units: specter.UnitGroup{ &specterutils.GenericUnit{ - UnitName: "test", + UnitID: "test", Attributes: []specterutils.GenericUnitAttribute{ { Name: "description", @@ -306,29 +141,18 @@ func TestCompositeUnitLinter(t *testing.T) { name: "GIVEN invalid units THEN return empty result set", given: args{ linters: []specterutils.UnitLinter{ - specterutils.UnitMustNotHaveUndefinedNames(specterutils.ErrorSeverity), - specterutils.UnitsDescriptionsMustStartWithACapitalLetter(specterutils.ErrorSeverity), - specterutils.UnitsDescriptionsMustEndWithPeriod(specterutils.ErrorSeverity), + specterutils.UnitsMustHaveIDs(specterutils.ErrorSeverity), }, units: specter.UnitGroup{ &specterutils.GenericUnit{ - UnitName: "", + UnitID: "", // invalid because of ID }, }, }, then: specterutils.LinterResultSet{ { Severity: specterutils.ErrorSeverity, - Message: "unit at \"\" has an undefined name", - }, - { - Severity: specterutils.ErrorSeverity, - Message: "the description of unit \"\" at location \"\" does not start with a capital letter", - }, - - { - Severity: specterutils.ErrorSeverity, - Message: "the description of unit \"\" at location \"\" does not end with a period", + Message: "a unit of kind \"\" has no ID at \"\"", }, }, }, @@ -481,7 +305,7 @@ func TestLintingProcessor_Name(t *testing.T) { func TestLintingProcessor_Process(t *testing.T) { type args struct { linters []specterutils.UnitLinter - ctx specter.ProcessingContext + ctx specter.UnitProcessingContext } tests := []struct { name string @@ -493,9 +317,7 @@ func TestLintingProcessor_Process(t *testing.T) { name: "GIVEN an empty processing context", given: args{ linters: nil, - ctx: specter.ProcessingContext{ - Logger: specter.NewDefaultLogger(specter.DefaultLoggerConfig{}), - }, + ctx: specter.UnitProcessingContext{}, }, then: []specter.Artifact{ specterutils.LinterResultSet(nil), @@ -510,9 +332,8 @@ func TestLintingProcessor_Process(t *testing.T) { return specterutils.LinterResultSet{{Severity: specterutils.WarningSeverity, Message: "a warning"}} }), }, - ctx: specter.ProcessingContext{ - Units: []specter.Unit{specterutils.NewGenericUnit("unit", "spec_type", specter.Source{})}, - Logger: specter.NewDefaultLogger(specter.DefaultLoggerConfig{}), + ctx: specter.UnitProcessingContext{ + Units: []specter.Unit{specterutils.NewGenericUnit("unit", "spec_type", specter.Source{})}, }, }, then: []specter.Artifact{ @@ -527,9 +348,8 @@ func TestLintingProcessor_Process(t *testing.T) { return specterutils.LinterResultSet{{Severity: specterutils.ErrorSeverity, Message: assert.AnError.Error()}} }), }, - ctx: specter.ProcessingContext{ - Units: []specter.Unit{specterutils.NewGenericUnit("unit", "spec_type", specter.Source{})}, - Logger: specter.NewDefaultLogger(specter.DefaultLoggerConfig{}), + ctx: specter.UnitProcessingContext{ + Units: []specter.Unit{specterutils.NewGenericUnit("unit", "spec_type", specter.Source{})}, }, }, then: []specter.Artifact{ @@ -552,9 +372,8 @@ func TestLintingProcessor_Process(t *testing.T) { } }), }, - ctx: specter.ProcessingContext{ - Units: []specter.Unit{specterutils.NewGenericUnit("unit", "spec_type", specter.Source{})}, - Logger: specter.NewDefaultLogger(specter.DefaultLoggerConfig{}), + ctx: specter.UnitProcessingContext{ + Units: []specter.Unit{specterutils.NewGenericUnit("unit", "spec_type", specter.Source{})}, }, }, then: []specter.Artifact{ @@ -589,12 +408,12 @@ func TestLintingProcessor_Process(t *testing.T) { func TestGetLintingResultsFromContext(t *testing.T) { tests := []struct { name string - given specter.ProcessingContext + given specter.UnitProcessingContext then specterutils.LinterResultSet }{ { name: "GIVEN context with linting results THEN return linting results", - given: specter.ProcessingContext{ + given: specter.UnitProcessingContext{ Artifacts: []specter.Artifact{ specterutils.LinterResultSet{{Severity: specterutils.WarningSeverity, Message: "a warning"}}, }, @@ -603,12 +422,12 @@ func TestGetLintingResultsFromContext(t *testing.T) { }, { name: "GIVEN context with not linting results THEN return empty linting results", - given: specter.ProcessingContext{}, + given: specter.UnitProcessingContext{}, then: specterutils.LinterResultSet(nil), }, { name: "GIVEN a context with wrong value for artifact name THEN return nil", - given: specter.ProcessingContext{ + given: specter.UnitProcessingContext{ Artifacts: []specter.Artifact{ testutils.NewArtifactStub(specterutils.LinterResultArtifactID), }, diff --git a/pkg/specterutils/unitversion.go b/pkg/specterutils/unitversion.go index 6734fd3..26e7b4b 100644 --- a/pkg/specterutils/unitversion.go +++ b/pkg/specterutils/unitversion.go @@ -45,7 +45,7 @@ func HasVersionMustHaveAVersionLinter(severity LinterResultSeverity) UnitLinter r = append(r, LinterResult{ Severity: severity, - Message: fmt.Sprintf("unit %q at %q should have a version", unit.Name(), unit.Source().Location), + Message: fmt.Sprintf("unit %q at %q should have a version", unit.ID(), unit.Source().Location), }) } return r diff --git a/pkg/specterutils/unitversion_test.go b/pkg/specterutils/unitversion_test.go index 5a80a03..005ac2a 100644 --- a/pkg/specterutils/unitversion_test.go +++ b/pkg/specterutils/unitversion_test.go @@ -26,29 +26,25 @@ import ( var _ specter.Unit = (*mockUnit)(nil) type mockUnit struct { - name specter.UnitName + name specter.UnitID description string source specter.Source version specterutils.UnitVersion - typeName specter.UnitType + kind specter.UnitKind } -func (m *mockUnit) Name() specter.UnitName { +func (m *mockUnit) ID() specter.UnitID { return m.name } -func (m *mockUnit) Type() specter.UnitType { - return m.typeName +func (m *mockUnit) Kind() specter.UnitKind { + return m.kind } func (m *mockUnit) Description() string { return m.description } -func (m *mockUnit) SetSource(s specter.Source) { - m.source = s -} - func (m *mockUnit) Source() specter.Source { return m.source } diff --git a/pkg/testutils/artifactproc.go b/pkg/testutils/artifactproc.go new file mode 100644 index 0000000..6feaac6 --- /dev/null +++ b/pkg/testutils/artifactproc.go @@ -0,0 +1,46 @@ +package testutils + +import ( + "github.com/morebec/specter/pkg/specter" + "github.com/stretchr/testify/mock" +) + +type ArtifactStub struct { + id specter.ArtifactID +} + +func NewArtifactStub(id specter.ArtifactID) *ArtifactStub { + return &ArtifactStub{id: id} +} + +func (m ArtifactStub) ID() specter.ArtifactID { + return m.id +} + +// MockArtifactRegistry is a mock implementation of ArtifactRegistry +type MockArtifactRegistry struct { + mock.Mock +} + +func (m *MockArtifactRegistry) Load() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockArtifactRegistry) Save() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockArtifactRegistry) Add(processorName string, artifactID specter.ArtifactID) { + m.Called(processorName, artifactID) +} + +func (m *MockArtifactRegistry) Remove(processorName string, artifactID specter.ArtifactID) { + m.Called(processorName, artifactID) +} + +func (m *MockArtifactRegistry) Artifacts(processorName string) []specter.ArtifactID { + args := m.Called(processorName) + return args.Get(0).([]specter.ArtifactID) +} diff --git a/pkg/testutils/errors.go b/pkg/testutils/errors.go new file mode 100644 index 0000000..b7334c8 --- /dev/null +++ b/pkg/testutils/errors.go @@ -0,0 +1,18 @@ +package testutils + +import ( + "github.com/morebec/go-errors/errors" + "github.com/stretchr/testify/require" +) + +func RequireErrorWithCode(c string) require.ErrorAssertionFunc { + return func(t require.TestingT, err error, i ...interface{}) { + require.Error(t, err) + + var sysError errors.SystemError + if !errors.As(err, &sysError) { + t.Errorf("expected a system error with code %q but got %s", c, err) + } + require.Equal(t, c, sysError.Code()) + } +} diff --git a/pkg/testutils/utils.go b/pkg/testutils/filesystem.go similarity index 55% rename from pkg/testutils/utils.go rename to pkg/testutils/filesystem.go index 9da94f3..2ee56fd 100644 --- a/pkg/testutils/utils.go +++ b/pkg/testutils/filesystem.go @@ -1,75 +1,12 @@ -// Copyright 2024 Morébec -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package testutils import ( - "github.com/morebec/go-errors/errors" - "github.com/morebec/specter/pkg/specter" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" "io/fs" "os" "strings" "sync" ) -// / ===================================================================================================================/// -func RequireErrorWithCode(c string) require.ErrorAssertionFunc { - return func(t require.TestingT, err error, i ...interface{}) { - require.Error(t, err) - - var sysError errors.SystemError - if !errors.As(err, &sysError) { - t.Errorf("expected a system error with code %q but got %s", c, err) - } - require.Equal(t, c, sysError.Code()) - } -} - -var _ specter.Unit = (*UnitStub)(nil) - -type UnitStub struct { - Name_ specter.UnitName - TypeName specter.UnitType - Src specter.Source - desc string -} - -func (us *UnitStub) Name() specter.UnitName { - return us.Name_ -} - -func (us *UnitStub) Type() specter.UnitType { - return us.TypeName -} - -func (us *UnitStub) Description() string { - return us.desc -} - -func (us *UnitStub) Source() specter.Source { - return us.Src -} - -func (us *UnitStub) SetSource(src specter.Source) { - us.Src = src -} - -// FILE SYSTEM -var _ specter.FileSystem = (*MockFileSystem)(nil) - // Mock implementations to use in tests. type mockFileInfo struct { os.FileInfo @@ -249,47 +186,3 @@ func (m *MockFileSystem) ReadFile(filePath string) ([]byte, error) { return nil, os.ErrNotExist } - -// ARTIFACTS - -var _ specter.Artifact = ArtifactStub{} - -type ArtifactStub struct { - id specter.ArtifactID -} - -func NewArtifactStub(id specter.ArtifactID) *ArtifactStub { - return &ArtifactStub{id: id} -} - -func (m ArtifactStub) ID() specter.ArtifactID { - return m.id -} - -// MockArtifactRegistry is a mock implementation of ArtifactRegistry -type MockArtifactRegistry struct { - mock.Mock -} - -func (m *MockArtifactRegistry) Load() error { - args := m.Called() - return args.Error(0) -} - -func (m *MockArtifactRegistry) Save() error { - args := m.Called() - return args.Error(0) -} - -func (m *MockArtifactRegistry) Add(processorName string, artifactID specter.ArtifactID) { - m.Called(processorName, artifactID) -} - -func (m *MockArtifactRegistry) Remove(processorName string, artifactID specter.ArtifactID) { - m.Called(processorName, artifactID) -} - -func (m *MockArtifactRegistry) Artifacts(processorName string) []specter.ArtifactID { - args := m.Called(processorName) - return args.Get(0).([]specter.ArtifactID) -} diff --git a/pkg/testutils/unitproc.go b/pkg/testutils/unitproc.go new file mode 100644 index 0000000..292d3b7 --- /dev/null +++ b/pkg/testutils/unitproc.go @@ -0,0 +1,23 @@ +// Copyright 2024 Morébec +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutils + +import ( + "github.com/morebec/specter/pkg/specter" +) + +func NewUnitStub(id specter.UnitID, kind specter.UnitKind, source specter.Source) specter.Unit { + return specter.UnitOf[any](nil, id, kind, source) +}