diff --git a/cmd/core-controller/main.go b/cmd/core-controller/main.go deleted file mode 100644 index ae1fc932..00000000 --- a/cmd/core-controller/main.go +++ /dev/null @@ -1,26 +0,0 @@ -package main - -import ( - "os" - - injection "knative.dev/pkg/injection" - "knative.dev/pkg/injection/sharedmain" - "knative.dev/pkg/signals" - - "github.com/zeiss/typhoon/pkg/reconciler/redisbroker" - "github.com/zeiss/typhoon/pkg/reconciler/trigger" -) - -func main() { - ctx := signals.NewContext() - - ns := os.Getenv("WORKING_NAMESPACE") - if len(ns) != 0 { - ctx = injection.WithNamespaceScope(ctx, ns) - } - - sharedmain.MainWithContext(ctx, "core-controller", - redisbroker.NewController, - trigger.NewController, - ) -} diff --git a/cmd/redis-broker/main.go b/cmd/redis-broker/main.go deleted file mode 100644 index 68bd604e..00000000 --- a/cmd/redis-broker/main.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "context" - "log" - "os" - - "github.com/zeiss/typhoon/pkg/brokers/backend/impl/redis" - "github.com/zeiss/typhoon/pkg/brokers/broker" - - "github.com/katallaxie/pkg/logger" - "github.com/katallaxie/pkg/server" - "github.com/spf13/cobra" -) - -// Config ... -type Config struct { - Flags *Flags -} - -// Flags ... -type Flags struct { - Addr string -} - -var cfg = &Config{ - Flags: &Flags{}, -} - -var rootCmd = &cobra.Command{ - RunE: func(cmd *cobra.Command, args []string) error { - return run(cmd.Context()) - }, -} - -func init() { - // rootCmd.PersistentFlags().StringVar(&cfg.Flags.Addr, "addr", ":3000", "addr") - // rootCmd.PersistentFlags().StringVar(&cfg.Flags.DB.Database, "db-database", cfg.Flags.DB.Database, "Database name") - // rootCmd.PersistentFlags().StringVar(&cfg.Flags.DB.Username, "db-username", cfg.Flags.DB.Username, "Database user") - // rootCmd.PersistentFlags().StringVar(&cfg.Flags.DB.Password, "db-password", cfg.Flags.DB.Password, "Database password") - // rootCmd.PersistentFlags().IntVar(&cfg.Flags.DB.Port, "db-port", cfg.Flags.DB.Port, "Database port") - - rootCmd.SilenceUsage = true -} - -type srv struct{} - -func (b *srv) Start(ctx context.Context, _ server.ReadyFunc, _ server.RunFunc) func() error { - return func() error { - l, err := logger.NewLogSink() - if err != nil { - return err - } - - b := redis.New(nil, l.Sugar()) - - s, err := broker.NewInstance(nil, b) - if err != nil { - return err - } - - return s.Start(ctx) - } -} - -func run(ctx context.Context) error { - log.SetFlags(0) - log.SetOutput(os.Stderr) - - logger.RedirectStdLog(logger.LogSink) - - broker := &srv{} - - srv, _ := server.WithContext(ctx) - srv.Listen(broker, false) - - err := srv.Wait() - if err != nil { - return err - } - - return nil -} - -func main() { - if err := rootCmd.Execute(); err != nil { - panic(err) - } -} diff --git a/hack/inc.codegen.mk b/hack/inc.codegen.mk index 8ad07d72..bfe70092 100644 --- a/hack/inc.codegen.mk +++ b/hack/inc.codegen.mk @@ -3,18 +3,17 @@ # see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api_changes.md#generate-code # Name of the Go package for this repository -PKG := github.com/zeiss/typhoon +PKG := github.com/zeiss/typhoon # List of API groups to generate code for # e.g. "sources/v1alpha1 sources/v1alpha2" -API_GROUPS := sources/v1alpha1 targets/v1alpha1 flow/v1alpha1 extensions/v1alpha1 routing/v1alpha1 eventing/v1alpha1 +API_GROUPS := sources/v1alpha1 targets/v1alpha1 flow/v1alpha1 extensions/v1alpha1 routing/v1alpha1 # generates e.g. "PKG/apis/sources/v1alpha1 PKG/apis/sources/v1alpha2" api-import-paths := $(foreach group,$(API_GROUPS),$(PKG)/pkg/apis/$(group)) -generators := deepcopy client lister informer injection +generators := deepcopy client lister informer injection .PHONY: codegen $(generators) - codegen: $(generators) # http://blog.jgc.org/2007/06/escaping-comma-and-space-in-gnu-make.html @@ -79,10 +78,8 @@ injection: --listers-package $(PKG)/pkg/client/generated/listers \ --external-versions-informers-package $(PKG)/pkg/client/generated/informers/externalversions -# In environments where the project is located outside the GOPATH, -# codegen creates a nested $(PKG) directory right in the project's root. -# Until the codegen configuration gets fixed, -# this target can be used to move generated files where they belong. +# Cleanup codegen +.PHONY: codegen-cleanup codegen-cleanup: @if [ -d "./$(PKG)" ]; then \ cp -a ./$(PKG)/pkg/client/generated/ pkg/client/generated/ ;\ diff --git a/pkg/apis/eventing/eventing.go b/pkg/apis/eventing/eventing.go deleted file mode 100644 index 153fabd5..00000000 --- a/pkg/apis/eventing/eventing.go +++ /dev/null @@ -1,42 +0,0 @@ -package eventing - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - GroupName = "eventing.typhoon.zeiss.com" -) - -// BrokersResource represents a Redis broker resource. -var RedisBrokersResource = schema.GroupResource{ - Group: GroupName, - Resource: "redisbrokers", -} - -// Given an object accessor, return a list of owner references that are brokers. -func GetOwnerBrokers(object metav1.ObjectMetaAccessor) []metav1.OwnerReference { - ors := []metav1.OwnerReference{} - - for _, or := range object.GetObjectMeta().GetOwnerReferences() { - gv, err := schema.ParseGroupVersion(or.APIVersion) - if err != nil { - continue - } - - if gv.Group == GroupName && IsBrokerKind(or.Kind) { - ors = append(ors, or) - } - } - - return ors -} - -func IsBrokerKind(kind string) bool { - if kind == "RedisBroker" || kind == "MemoryBroker" { - return true - } - - return false -} diff --git a/pkg/apis/eventing/v1alpha1/doc.go b/pkg/apis/eventing/v1alpha1/doc.go deleted file mode 100644 index b2668445..00000000 --- a/pkg/apis/eventing/v1alpha1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package v1alpha1 is the v1alpha1 version of the API. -// +k8s:deepcopy-gen=package -// +groupName=eventing.typhoon.zeiss.com -package v1alpha1 diff --git a/pkg/apis/eventing/v1alpha1/interfaces.go b/pkg/apis/eventing/v1alpha1/interfaces.go deleted file mode 100644 index 4eeefb81..00000000 --- a/pkg/apis/eventing/v1alpha1/interfaces.go +++ /dev/null @@ -1,57 +0,0 @@ -package v1alpha1 - -import ( - "context" - - appsv1 "k8s.io/api/apps/v1" - "knative.dev/pkg/kmeta" -) - -type Broker struct { - Port *int `json:"port,omitempty"` - - Observability *Observability `json:"observability,omitempty"` -} - -type Observability struct { - ValueFromConfigMap string `json:"valueFromConfigMap"` -} - -type ReconcilableBroker interface { - kmeta.OwnerRefable - - GetReconcilableBrokerStatus() ReconcilableBrokerStatus - GetOwnedObjectsSuffix() string - GetReconcilableBrokerSpec() *Broker -} - -type ReconcilableBrokerStatus interface { - // Secret as config status management. - MarkConfigSecretFailed(reason, messageFormat string, messageA ...interface{}) - MarkConfigSecretReady() - - // Status Config management. - MarkStatusConfigFailed(reason, messageFormat string, messageA ...interface{}) - MarkStatusConfigReady() - - // ServiceAccount status management. - MarkBrokerServiceAccountFailed(reason, messageFormat string, messageA ...interface{}) - MarkBrokerServiceAccountReady() - - // RoleBinding status management. - MarkBrokerRoleBindingFailed(reason, messageFormat string, messageA ...interface{}) - MarkBrokerRoleBindingReady() - - // Broker Deployment status management. - MarkBrokerDeploymentFailed(reason, messageFormat string, messageA ...interface{}) - PropagateBrokerDeploymentAvailability(ctx context.Context, ds *appsv1.DeploymentStatus) - - // Broker Service status management - MarkBrokerServiceFailed(reason, messageFormat string, messageA ...interface{}) - MarkBrokerServiceReady() - - // Broker Endpoints status management. - MarkBrokerEndpointsTrue() - MarkBrokerEndpointsUnknown(reason, messageFormat string, messageA ...interface{}) - MarkBrokerEndpointsFailed(reason, messageFormat string, messageA ...interface{}) -} diff --git a/pkg/apis/eventing/v1alpha1/redisbroker_lifecycle.go b/pkg/apis/eventing/v1alpha1/redisbroker_lifecycle.go deleted file mode 100644 index 742470ec..00000000 --- a/pkg/apis/eventing/v1alpha1/redisbroker_lifecycle.go +++ /dev/null @@ -1,286 +0,0 @@ -package v1alpha1 - -import ( - "context" - "sync" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "knative.dev/pkg/apis" - duckv1 "knative.dev/pkg/apis/duck/v1" -) - -// RedisBrokerRedis refers to the Redis instance backing the broker. - -const ( - RedisBrokerConditionReady = apis.ConditionReady - RedisBrokerRedisDeployment apis.ConditionType = "RedisDeploymentReady" - RedisBrokerRedisService apis.ConditionType = "RedisServiceReady" - RedisBrokerRedisServiceEndpointsConditionReady apis.ConditionType = "RedisEndpointsReady" - RedisBrokerBrokerDeployment apis.ConditionType = "BrokerDeploymentReady" - RedisBrokerBrokerServiceAccount apis.ConditionType = "BrokerServiceAccountReady" - RedisBrokerBrokerRoleBinding apis.ConditionType = "RedisBrokerBrokerRoleBinding" - RedisBrokerBrokerService apis.ConditionType = "BrokerServiceReady" - RedisBrokerBrokerServiceEndpointsConditionReady apis.ConditionType = "BrokerEndpointsReady" - RedisBrokerConfigSecret apis.ConditionType = "BrokerConfigSecretReady" - RedisBrokerConditionAddressable apis.ConditionType = "Addressable" - RedisBrokerStatusConfig apis.ConditionType = "BrokerStatusConfigReady" - - RedisBrokerReasonUserProvided string = "ReasonUserProvidedRedis" -) - -var redisBrokerCondSet = apis.NewLivingConditionSet( - RedisBrokerRedisDeployment, - RedisBrokerRedisService, - RedisBrokerRedisServiceEndpointsConditionReady, - RedisBrokerBrokerServiceAccount, - RedisBrokerBrokerRoleBinding, - RedisBrokerBrokerDeployment, - RedisBrokerBrokerService, - RedisBrokerBrokerServiceEndpointsConditionReady, - RedisBrokerConfigSecret, - RedisBrokerConditionAddressable, - RedisBrokerStatusConfig, -) -var redisBrokerCondSetLock = sync.RWMutex{} - -// GetGroupVersionKind returns GroupVersionKind for Brokers -func (t *RedisBroker) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind("RedisBroker") -} - -// GetStatus retrieves the status of the Broker. Implements the KRShaped interface. -func (t *RedisBroker) GetStatus() *duckv1.Status { - return &t.Status.Status -} - -// GetReconcilableBrokerSpec returns the all brokers common Broker spec. -func (t *RedisBroker) GetReconcilableBrokerSpec() *Broker { - return &t.Spec.Broker -} - -// GetReconcilableBrokerStatus returns a status interface that allows generic reconciler -// to manage it. -func (t *RedisBroker) GetReconcilableBrokerStatus() ReconcilableBrokerStatus { - return &t.Status -} - -// GetOwnedObjectsSuffix returns a string to be appended for created/owned objects. -func (t *RedisBroker) GetOwnedObjectsSuffix() string { - return "rb" -} - -// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. -func (b *RedisBroker) GetConditionSet() apis.ConditionSet { - redisBrokerCondSetLock.RLock() - defer redisBrokerCondSetLock.RUnlock() - - return redisBrokerCondSet -} - -// IsExternalRedis returns if the Redis instance is user provided. -func (b *RedisBroker) IsUserProvidedRedis() bool { - if b.Spec.Redis != nil && b.Spec.Redis.Connection != nil { - return true - } - return false -} - -// GetConditionSet retrieves the condition set for this resource. -func (bs *RedisBrokerStatus) GetConditionSet() apis.ConditionSet { - redisBrokerCondSetLock.RLock() - defer redisBrokerCondSetLock.RUnlock() - - return redisBrokerCondSet -} - -// GetTopLevelCondition returns the top level Condition. -func (bs *RedisBrokerStatus) GetTopLevelCondition() *apis.Condition { - return bs.GetConditionSet().Manage(bs).GetTopLevelCondition() -} - -// SetAddress makes this Broker addressable by setting the URI. It also -// sets the BrokerConditionAddressable to true. -func (bs *RedisBrokerStatus) SetAddress(url *apis.URL) { - bs.Address.URL = url - if url != nil { - bs.GetConditionSet().Manage(bs).MarkTrue(RedisBrokerConditionAddressable) - } else { - bs.GetConditionSet().Manage(bs).MarkFalse(RedisBrokerConditionAddressable, "nil URL", "URL is nil") - } -} - -// GetCondition returns the condition currently associated with the given type, or nil. -func (bs *RedisBrokerStatus) GetCondition(t apis.ConditionType) *apis.Condition { - return bs.GetConditionSet().Manage(bs).GetCondition(t) -} - -// IsReady returns true if the resource is ready overall and the latest spec has been observed. -func (b *RedisBroker) IsReady() bool { - bs := b.Status - return bs.ObservedGeneration == b.Generation && - b.GetConditionSet().Manage(&bs).IsHappy() -} - -// InitializeConditions sets relevant unset conditions to Unknown state. -func (bs *RedisBrokerStatus) InitializeConditions() { - bs.GetConditionSet().Manage(bs).InitializeConditions() -} - -func (bs *RedisBrokerStatus) MarkConfigSecretFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerConfigSecret, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkConfigSecretUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerConfigSecret, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkConfigSecretReady() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerConfigSecret) -} - -func (bs *RedisBrokerStatus) MarkStatusConfigFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerStatusConfig, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkStatusConfigUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerStatusConfig, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkStatusConfigReady() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerStatusConfig) -} - -// Manage Broker's service account and rolebinding. - -func (bs *RedisBrokerStatus) MarkBrokerServiceAccountFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerBrokerServiceAccount, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerServiceAccountUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerBrokerServiceAccount, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerServiceAccountReady() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerBrokerServiceAccount) -} - -func (bs *RedisBrokerStatus) MarkBrokerRoleBindingFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerBrokerRoleBinding, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerRoleBindingUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerBrokerRoleBinding, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerRoleBindingReady() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerBrokerRoleBinding) -} - -// Manage Redis server state for both -// Service and Deployment - -func (bs *RedisBrokerStatus) MarkRedisDeploymentFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerRedisDeployment, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkRedisDeploymentUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerRedisDeployment, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) PropagateRedisDeploymentAvailability(ctx context.Context, ds *appsv1.DeploymentStatus) { - for _, cond := range ds.Conditions { - if cond.Type == appsv1.DeploymentAvailable { - switch cond.Status { - case corev1.ConditionTrue: - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerRedisDeployment) - case corev1.ConditionFalse: - bs.MarkRedisDeploymentFailed("RedisDeploymentFalse", "The status of Redis Deployment is False: %s : %s", cond.Reason, cond.Message) - default: - // expected corev1.ConditionUnknown - bs.MarkRedisDeploymentUnknown("RedisDeploymentUnknown", "The status of Redis Deployment is Unknown: %s : %s", cond.Reason, cond.Message) - } - } - } -} - -func (bs *RedisBrokerStatus) MarkRedisServiceFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerRedisService, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkRedisServiceUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerRedisService, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkRedisServiceReady() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerRedisService) -} - -func (bs *RedisBrokerStatus) MarkRedisEndpointsFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerRedisServiceEndpointsConditionReady, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkRedisEndpointsUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerRedisServiceEndpointsConditionReady, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkRedisEndpointsTrue() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerRedisServiceEndpointsConditionReady) -} - -// Manage Redis broker state for -// Deployment, Service and Endpoint -func (bs *RedisBrokerStatus) MarkBrokerDeploymentFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerBrokerDeployment, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerDeploymentUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerBrokerDeployment, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) PropagateBrokerDeploymentAvailability(ctx context.Context, ds *appsv1.DeploymentStatus) { - for _, cond := range ds.Conditions { - if cond.Type == appsv1.DeploymentAvailable { - switch cond.Status { - case corev1.ConditionTrue: - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerBrokerDeployment) - case corev1.ConditionFalse: - bs.MarkBrokerDeploymentFailed("BrokerDeploymentFalse", "The status of Broker Deployment is False: %s : %s", cond.Reason, cond.Message) - default: - // expected corev1.ConditionUnknown - bs.MarkBrokerDeploymentUnknown("BrokerDeploymentUnknown", "The status of Broker Deployment is Unknown: %s : %s", cond.Reason, cond.Message) - } - } - } -} - -func (bs *RedisBrokerStatus) MarkBrokerServiceFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerBrokerService, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerServiceUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerBrokerService, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerServiceReady() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerBrokerService) -} - -func (bs *RedisBrokerStatus) MarkBrokerEndpointsFailed(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkFalse(RedisBrokerBrokerServiceEndpointsConditionReady, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerEndpointsUnknown(reason, messageFormat string, messageA ...interface{}) { - redisBrokerCondSet.Manage(bs).MarkUnknown(RedisBrokerBrokerServiceEndpointsConditionReady, reason, messageFormat, messageA...) -} - -func (bs *RedisBrokerStatus) MarkBrokerEndpointsTrue() { - redisBrokerCondSet.Manage(bs).MarkTrue(RedisBrokerBrokerServiceEndpointsConditionReady) -} - -func (bs *RedisBrokerStatus) MarkRedisUserProvided() { - redisBrokerCondSet.Manage(bs).MarkTrueWithReason(RedisBrokerRedisDeployment, RedisBrokerReasonUserProvided, "Redis instance is externally provided") - redisBrokerCondSet.Manage(bs).MarkTrueWithReason(RedisBrokerRedisService, RedisBrokerReasonUserProvided, "Redis instance is externally provided") - redisBrokerCondSet.Manage(bs).MarkTrueWithReason(RedisBrokerRedisServiceEndpointsConditionReady, RedisBrokerReasonUserProvided, "Redis instance is externally provided") -} diff --git a/pkg/apis/eventing/v1alpha1/redisbroker_types.go b/pkg/apis/eventing/v1alpha1/redisbroker_types.go deleted file mode 100644 index 15d05f54..00000000 --- a/pkg/apis/eventing/v1alpha1/redisbroker_types.go +++ /dev/null @@ -1,119 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - duckv1 "knative.dev/pkg/apis/duck/v1" -) - -// +genclient -// +genreconciler -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RedisBroker is a Redis based broker implementation that collects a pool of -// events that are consumable using Triggers. Brokers provide a well-known endpoint -// for event delivery that senders can use with minimal knowledge of the event -// routing strategy. Subscribers use Triggers to request delivery of events from a -// broker's pool to a specific URL or Addressable endpoint. -type RedisBroker struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired state of the broker. - Spec RedisBrokerSpec `json:"spec,omitempty"` - - // Status represents the current state of the broker. This data may be out of - // date. - // +optional - Status RedisBrokerStatus `json:"status,omitempty"` -} - -var ( - // Make sure this is a kubernetes object. - _ runtime.Object = (*RedisBroker)(nil) - // Check that we can reconcile this object as a Broker. - _ ReconcilableBroker = (*RedisBroker)(nil) - // Check that the type conforms to the duck Knative Resource shape. - _ duckv1.KRShaped = (*RedisBroker)(nil) -) - -type RedisConnection struct { - // Redis URL for standalone instances - URL *string `json:"url,omitempty"` - - // Redis URLs for cluster instances - ClusterURLs []string `json:"clusterURLs,omitempty"` - - // Redis username. - Username *SecretValueFromSource `json:"username,omitempty"` - - // Redis password. - Password *SecretValueFromSource `json:"password,omitempty"` - - // CA Certificate used to connect to Redis. - TLSCACertificate *SecretValueFromSource `json:"tlsCACertificate,omitempty"` - - // Certificate used to connect to authenticate to Redis. - TLSCertificate *SecretValueFromSource `json:"tlsCertificate,omitempty"` - - // Certificate Key used to connect to authenticate to Redis. - TLSKey *SecretValueFromSource `json:"tlsKey,omitempty"` - - // Use TLS enctrypted connection. - TLSEnabled *bool `json:"tlsEnabled,omitempty"` - - // Skip TLS certificate verification. - TLSSkipVerify *bool `json:"tlsSkipVerify,omitempty"` -} - -type Redis struct { - // Redis connection data. - Connection *RedisConnection `json:"connection,omitempty"` - - // Stream name used by the broker. - Stream *string `json:"stream,omitempty"` - - // Maximum number of items the stream can host. - StreamMaxLen *int `json:"streamMaxLen,omitempty"` - - // Whether the Redis ID for the event is added as a CloudEvents attribute. - EnableTrackingID *bool `json:"enableTrackingID,omitempty"` -} - -// SecretValueFromSource represents the source of a secret value -type SecretValueFromSource struct { - // The Secret key to select from. - SecretKeyRef corev1.SecretKeySelector `json:"secretKeyRef,omitempty"` -} - -type RedisBrokerSpec struct { - Redis *Redis `json:"redis,omitempty"` - - Broker Broker `json:"broker,omitempty"` -} - -// RedisBrokerStatus represents the current state of a Redis broker. -type RedisBrokerStatus struct { - // inherits duck/v1 Status, which currently provides: - // * ObservedGeneration - the 'Generation' of the Broker that was last processed by the controller. - // * Conditions - the latest available observations of a resource's current state. - duckv1.Status `json:",inline"` - - // Broker is Addressable. It exposes the endpoint as an URI to get events - // delivered into the Broker mesh. - // +optional - Address duckv1.Addressable `json:"address,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RedisBrokerList is a collection of Brokers. -type RedisBrokerList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []RedisBroker `json:"items"` -} diff --git a/pkg/apis/eventing/v1alpha1/register.go b/pkg/apis/eventing/v1alpha1/register.go deleted file mode 100644 index ec793954..00000000 --- a/pkg/apis/eventing/v1alpha1/register.go +++ /dev/null @@ -1,40 +0,0 @@ -package v1alpha1 - -import ( - "github.com/zeiss/typhoon/pkg/apis/eventing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: eventing.GroupName, Version: "v1alpha1"} - // SchemeBuilder creates a Scheme builder that is used to register types for this custom API. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme registers the types stored in SchemeBuilder. - AddToScheme = SchemeBuilder.AddToScheme -) - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &RedisBroker{}, - &RedisBrokerList{}, - &Trigger{}, - &TriggerList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go b/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go deleted file mode 100644 index c6e39358..00000000 --- a/pkg/apis/eventing/v1alpha1/trigger_lifecycle.go +++ /dev/null @@ -1,173 +0,0 @@ -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "knative.dev/pkg/apis" - - duckv1 "knative.dev/pkg/apis/duck/v1" - "knative.dev/pkg/kmeta" -) - -var triggerCondSet = apis.NewLivingConditionSet(TriggerConditionBroker, TriggerConditionTargetResolved, TriggerConditionDeadLetterSinkResolved, TriggerConditionStatusConfigMap) - -const ( - // TriggerConditionReady has status True when all subconditions below have been set to True. - TriggerConditionReady = apis.ConditionReady - - TriggerConditionBroker apis.ConditionType = "BrokerReady" - - TriggerConditionStatusConfigMap apis.ConditionType = "StatusConfigMapReady" - - TriggerConditionTargetResolved apis.ConditionType = "TargetResolved" - - TriggerConditionDeadLetterSinkResolved apis.ConditionType = "DeadLetterSinkResolved" - - // TriggerAnyFilter Constant to represent that we should allow anything. - TriggerAnyFilter = "" -) - -// GetStatus retrieves the status of the Trigger. Implements the KRShaped interface. -func (t *Trigger) GetStatus() *duckv1.Status { - return &t.Status.Status -} - -// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. -func (*Trigger) GetConditionSet() apis.ConditionSet { - return triggerCondSet -} - -// GetGroupVersionKind returns GroupVersionKind for Triggers -func (t *Trigger) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind("Trigger") -} - -// GetUntypedSpec returns the spec of the Trigger. -func (t *Trigger) GetUntypedSpec() interface{} { - return t.Spec -} - -// GetCondition returns the condition currently associated with the given type, or nil. -func (ts *TriggerStatus) GetCondition(t apis.ConditionType) *apis.Condition { - return triggerCondSet.Manage(ts).GetCondition(t) -} - -// GetTopLevelCondition returns the top level Condition. -func (ts *TriggerStatus) GetTopLevelCondition() *apis.Condition { - return triggerCondSet.Manage(ts).GetTopLevelCondition() -} - -// IsReady returns true if the resource is ready overall. -func (ts *TriggerStatus) IsReady() bool { - return triggerCondSet.Manage(ts).IsHappy() -} - -// InitializeConditions sets relevant unset conditions to Unknown state. -func (ts *TriggerStatus) InitializeConditions() { - triggerCondSet.Manage(ts).InitializeConditions() -} - -func (ts *TriggerStatus) PropagateBrokerCondition(bc *apis.Condition) { - if bc == nil { - ts.MarkBrokerNotConfigured() - return - } - - switch { - case bc.Status == corev1.ConditionUnknown: - ts.MarkBrokerUnknown(bc.Reason, bc.Message) - case bc.Status == corev1.ConditionTrue: - triggerCondSet.Manage(ts).MarkTrue(TriggerConditionBroker) - case bc.Status == corev1.ConditionFalse: - ts.MarkBrokerFailed(bc.Reason, bc.Message) - default: - ts.MarkBrokerUnknown("BrokerUnknown", "The status of Broker is invalid: %v", bc.Status) - } -} - -func (ts *TriggerStatus) MarkBrokerFailed(reason, messageFormat string, messageA ...interface{}) { - triggerCondSet.Manage(ts).MarkFalse(TriggerConditionBroker, reason, messageFormat, messageA...) -} - -func (ts *TriggerStatus) MarkBrokerUnknown(reason, messageFormat string, messageA ...interface{}) { - triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionBroker, reason, messageFormat, messageA...) -} - -func (ts *TriggerStatus) MarkBrokerNotConfigured() { - triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionBroker, - "BrokerNotConfigured", "Broker has not yet been reconciled.") -} - -func (ts *TriggerStatus) MarkTargetResolvedSucceeded() { - triggerCondSet.Manage(ts).MarkTrue(TriggerConditionTargetResolved) -} - -func (ts *TriggerStatus) MarkTargetResolvedFailed(reason, messageFormat string, messageA ...interface{}) { - triggerCondSet.Manage(ts).MarkFalse(TriggerConditionTargetResolved, reason, messageFormat, messageA...) -} - -func (ts *TriggerStatus) MarkTargetResolvedUnknown(reason, messageFormat string, messageA ...interface{}) { - triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionTargetResolved, reason, messageFormat, messageA...) -} - -func (ts *TriggerStatus) MarkDeadLetterSinkResolvedSucceeded() { - triggerCondSet.Manage(ts).MarkTrue(TriggerConditionDeadLetterSinkResolved) -} - -func (ts *TriggerStatus) MarkDeadLetterSinkNotConfigured() { - triggerCondSet.Manage(ts).MarkTrueWithReason(TriggerConditionDeadLetterSinkResolved, "DeadLetterSinkNotConfigured", "No dead letter sink is configured.") -} - -func (ts *TriggerStatus) MarkDeadLetterSinkResolvedFailed(reason, messageFormat string, messageA ...interface{}) { - triggerCondSet.Manage(ts).MarkFalse(TriggerConditionDeadLetterSinkResolved, reason, messageFormat, messageA...) -} - -func (t *Trigger) OwnerRefableMatchesBroker(broker kmeta.OwnerRefable) bool { - gvk := broker.GetGroupVersionKind() - - // Require same namespace for Trigger and Broker. - if t.Spec.Broker.Namespace != "" && - t.Spec.Broker.Namespace != broker.GetObjectMeta().GetNamespace() { - return false - } - - // If APIVersion is informed it should match the Broker's. - if t.Spec.Broker.APIVersion != "" { - if t.Spec.Broker.APIVersion != gvk.GroupVersion().String() { - return false - } - } else if t.Spec.Broker.Group != gvk.Group { - return false - } - - return t.Spec.Broker.Name == broker.GetObjectMeta().GetName() && - t.Spec.Broker.Kind == gvk.Kind -} - -func (t *Trigger) OwnerReferenceMatchesBroker(broker metav1.OwnerReference) bool { - if t.Spec.Broker.APIVersion != "" && t.Spec.Broker.APIVersion != broker.APIVersion { - return false - } - - if t.Spec.Broker.Group != "" { - gv, err := schema.ParseGroupVersion(broker.APIVersion) - if err != nil { - return false - } - if t.Spec.Broker.Group != gv.Group { - return false - } - } - - return t.Spec.Broker.Name == broker.Name && - t.Spec.Broker.Kind == broker.Kind -} - -func (ts *TriggerStatus) MarkStatusConfigMapFailed(reason, messageFormat string, messageA ...interface{}) { - triggerCondSet.Manage(ts).MarkFalse(TriggerConditionStatusConfigMap, reason, messageFormat, messageA...) -} - -func (ts *TriggerStatus) MarkStatusConfigMapSucceeded(reason, message string) { - triggerCondSet.Manage(ts).MarkTrueWithReason(TriggerConditionStatusConfigMap, reason, message) -} diff --git a/pkg/apis/eventing/v1alpha1/trigger_types.go b/pkg/apis/eventing/v1alpha1/trigger_types.go deleted file mode 100644 index a100ee50..00000000 --- a/pkg/apis/eventing/v1alpha1/trigger_types.go +++ /dev/null @@ -1,125 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" - "knative.dev/pkg/apis" - duckv1 "knative.dev/pkg/apis/duck/v1" - "knative.dev/pkg/kmeta" - - "github.com/zeiss/typhoon/pkg/brokers/config/broker" -) - -// +genclient -// +genreconciler -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Trigger represents a request to have events delivered to a target from a -// Broker's event pool. -type Trigger struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired state of the Trigger. - Spec TriggerSpecBounded `json:"spec,omitempty"` - - // Status represents the current state of the Trigger. This data may be out of - // date. - // +optional - Status TriggerStatus `json:"status,omitempty"` -} - -var ( - // Make sure this is a kubernetes object. - _ runtime.Object = (*Trigger)(nil) - // Check that we can create OwnerReferences with this object. - _ kmeta.OwnerRefable = (*Trigger)(nil) - // Check that the type conforms to the duck Knative Resource shape. - _ duckv1.KRShaped = (*Trigger)(nil) -) - -// TriggerSpec defines the desired state of Trigger -type TriggerSpec struct { - // Broker is the broker that this trigger receives events from. - Broker duckv1.KReference `json:"broker,omitempty"` - - // Filters is an experimental field that conforms to the CNCF CloudEvents Subscriptions - // API. It's an array of filter expressions that evaluate to true or false. - // If any filter expression in the array evaluates to false, the event MUST - // NOT be sent to the target. If all the filter expressions in the array - // evaluate to true, the event MUST be attempted to be delivered. Absence of - // a filter or empty array implies a value of true. In the event of users - // specifying both Filter and Filters, then the latter will override the former. - // This will allow users to try out the effect of the new Filters field - // without compromising the existing attribute-based Filter and try it out on existing - // Trigger objects. - // - // +optional - Filters []broker.Filter `json:"filters,omitempty"` - - // Target is the addressable that receives events from the Broker that pass - // the Filter. It is required. - Target duckv1.Destination `json:"target,omitempty"` - - // Delivery contains the delivery spec for this specific trigger. - // +optional - Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"` -} - -type TriggerSpecBounded struct { - TriggerSpec `json:",inline"` - - // Bounds for the receiving events - Bounds *TriggerBounds `json:"bounds,omitempty"` -} - -// TriggerBounds set the policy for the event offsets we are interested in receiving. -type TriggerBounds struct { - // TriggerBoundsByID set offsets policy by backing broker ID. - ById *TriggerBoundsByID `json:"byId,omitempty"` - // TriggerBoundsByID set offsets policy by date. - ByDate *TriggerBoundsByDate `json:"byDate,omitempty"` -} - -type TriggerBoundsByID struct { - // Starting offset. - Start *string `json:"start,omitempty"` - // Ending offset. - End *string `json:"end,omitempty"` -} - -type TriggerBoundsByDate struct { - // Starting date. - Start *string `json:"start,omitempty"` - // Ending date. - End *string `json:"end,omitempty"` -} - -// TriggerStatus represents the current state of a Trigger. -type TriggerStatus struct { - // inherits duck/v1 Status, which currently provides: - // * ObservedGeneration - the 'Generation' of the Trigger that was last processed by the controller. - // * Conditions - the latest available observations of a resource's current state. - duckv1.Status `json:",inline"` - - // TargetURI is the resolved URI of the receiver for this Trigger. - // +optional - TargetURI *apis.URL `json:"targetUri,omitempty"` - - // DeliveryStatus contains a resolved URL to the dead letter sink address, and any other - // resolved delivery options. - eventingduckv1.DeliveryStatus `json:",inline"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TriggerList is a collection of Triggers. -type TriggerList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []Trigger `json:"items"` -} diff --git a/pkg/apis/eventing/v1alpha1/trigger_validation.go b/pkg/apis/eventing/v1alpha1/trigger_validation.go deleted file mode 100644 index 21a1479a..00000000 --- a/pkg/apis/eventing/v1alpha1/trigger_validation.go +++ /dev/null @@ -1,28 +0,0 @@ -package v1alpha1 - -import ( - "context" - - "github.com/zeiss/typhoon/pkg/brokers/config/broker" - - "knative.dev/pkg/apis" -) - -// Validate the Trigger. -func (t *Trigger) Validate(ctx context.Context) *apis.FieldError { - errs := t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec") - return errs -} - -// Validate the TriggerSpec. -func (ts *TriggerSpec) Validate(ctx context.Context) (errs *apis.FieldError) { - errs = ts.Broker.Validate(ctx).ViaField("broker") - - return errs.Also( - broker.ValidateSubscriptionAPIFiltersList(ctx, ts.Filters).ViaField("filters"), - ).Also( - ts.Target.Validate(ctx).ViaField("target"), - ).Also( - ts.Delivery.Validate(ctx).ViaField("delivery"), - ) -} diff --git a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 8fb0ac8a..00000000 --- a/pkg/apis/eventing/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,484 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - broker "github.com/zeiss/typhoon/pkg/brokers/config/broker" - runtime "k8s.io/apimachinery/pkg/runtime" - v1 "knative.dev/eventing/pkg/apis/duck/v1" - apis "knative.dev/pkg/apis" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Broker) DeepCopyInto(out *Broker) { - *out = *in - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int) - **out = **in - } - if in.Observability != nil { - in, out := &in.Observability, &out.Observability - *out = new(Observability) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Broker. -func (in *Broker) DeepCopy() *Broker { - if in == nil { - return nil - } - out := new(Broker) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Observability) DeepCopyInto(out *Observability) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Observability. -func (in *Observability) DeepCopy() *Observability { - if in == nil { - return nil - } - out := new(Observability) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Redis) DeepCopyInto(out *Redis) { - *out = *in - if in.Connection != nil { - in, out := &in.Connection, &out.Connection - *out = new(RedisConnection) - (*in).DeepCopyInto(*out) - } - if in.Stream != nil { - in, out := &in.Stream, &out.Stream - *out = new(string) - **out = **in - } - if in.StreamMaxLen != nil { - in, out := &in.StreamMaxLen, &out.StreamMaxLen - *out = new(int) - **out = **in - } - if in.EnableTrackingID != nil { - in, out := &in.EnableTrackingID, &out.EnableTrackingID - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Redis. -func (in *Redis) DeepCopy() *Redis { - if in == nil { - return nil - } - out := new(Redis) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisBroker) DeepCopyInto(out *RedisBroker) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisBroker. -func (in *RedisBroker) DeepCopy() *RedisBroker { - if in == nil { - return nil - } - out := new(RedisBroker) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RedisBroker) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisBrokerList) DeepCopyInto(out *RedisBrokerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RedisBroker, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisBrokerList. -func (in *RedisBrokerList) DeepCopy() *RedisBrokerList { - if in == nil { - return nil - } - out := new(RedisBrokerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RedisBrokerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisBrokerSpec) DeepCopyInto(out *RedisBrokerSpec) { - *out = *in - if in.Redis != nil { - in, out := &in.Redis, &out.Redis - *out = new(Redis) - (*in).DeepCopyInto(*out) - } - in.Broker.DeepCopyInto(&out.Broker) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisBrokerSpec. -func (in *RedisBrokerSpec) DeepCopy() *RedisBrokerSpec { - if in == nil { - return nil - } - out := new(RedisBrokerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisBrokerStatus) DeepCopyInto(out *RedisBrokerStatus) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - in.Address.DeepCopyInto(&out.Address) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisBrokerStatus. -func (in *RedisBrokerStatus) DeepCopy() *RedisBrokerStatus { - if in == nil { - return nil - } - out := new(RedisBrokerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RedisConnection) DeepCopyInto(out *RedisConnection) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.ClusterURLs != nil { - in, out := &in.ClusterURLs, &out.ClusterURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Username != nil { - in, out := &in.Username, &out.Username - *out = new(SecretValueFromSource) - (*in).DeepCopyInto(*out) - } - if in.Password != nil { - in, out := &in.Password, &out.Password - *out = new(SecretValueFromSource) - (*in).DeepCopyInto(*out) - } - if in.TLSCACertificate != nil { - in, out := &in.TLSCACertificate, &out.TLSCACertificate - *out = new(SecretValueFromSource) - (*in).DeepCopyInto(*out) - } - if in.TLSCertificate != nil { - in, out := &in.TLSCertificate, &out.TLSCertificate - *out = new(SecretValueFromSource) - (*in).DeepCopyInto(*out) - } - if in.TLSKey != nil { - in, out := &in.TLSKey, &out.TLSKey - *out = new(SecretValueFromSource) - (*in).DeepCopyInto(*out) - } - if in.TLSEnabled != nil { - in, out := &in.TLSEnabled, &out.TLSEnabled - *out = new(bool) - **out = **in - } - if in.TLSSkipVerify != nil { - in, out := &in.TLSSkipVerify, &out.TLSSkipVerify - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisConnection. -func (in *RedisConnection) DeepCopy() *RedisConnection { - if in == nil { - return nil - } - out := new(RedisConnection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretValueFromSource) DeepCopyInto(out *SecretValueFromSource) { - *out = *in - in.SecretKeyRef.DeepCopyInto(&out.SecretKeyRef) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretValueFromSource. -func (in *SecretValueFromSource) DeepCopy() *SecretValueFromSource { - if in == nil { - return nil - } - out := new(SecretValueFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Trigger) DeepCopyInto(out *Trigger) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Trigger. -func (in *Trigger) DeepCopy() *Trigger { - if in == nil { - return nil - } - out := new(Trigger) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Trigger) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerBounds) DeepCopyInto(out *TriggerBounds) { - *out = *in - if in.ById != nil { - in, out := &in.ById, &out.ById - *out = new(TriggerBoundsByID) - (*in).DeepCopyInto(*out) - } - if in.ByDate != nil { - in, out := &in.ByDate, &out.ByDate - *out = new(TriggerBoundsByDate) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerBounds. -func (in *TriggerBounds) DeepCopy() *TriggerBounds { - if in == nil { - return nil - } - out := new(TriggerBounds) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerBoundsByDate) DeepCopyInto(out *TriggerBoundsByDate) { - *out = *in - if in.Start != nil { - in, out := &in.Start, &out.Start - *out = new(string) - **out = **in - } - if in.End != nil { - in, out := &in.End, &out.End - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerBoundsByDate. -func (in *TriggerBoundsByDate) DeepCopy() *TriggerBoundsByDate { - if in == nil { - return nil - } - out := new(TriggerBoundsByDate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerBoundsByID) DeepCopyInto(out *TriggerBoundsByID) { - *out = *in - if in.Start != nil { - in, out := &in.Start, &out.Start - *out = new(string) - **out = **in - } - if in.End != nil { - in, out := &in.End, &out.End - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerBoundsByID. -func (in *TriggerBoundsByID) DeepCopy() *TriggerBoundsByID { - if in == nil { - return nil - } - out := new(TriggerBoundsByID) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerList) DeepCopyInto(out *TriggerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Trigger, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerList. -func (in *TriggerList) DeepCopy() *TriggerList { - if in == nil { - return nil - } - out := new(TriggerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TriggerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerSpec) DeepCopyInto(out *TriggerSpec) { - *out = *in - in.Broker.DeepCopyInto(&out.Broker) - if in.Filters != nil { - in, out := &in.Filters, &out.Filters - *out = make([]broker.Filter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Target.DeepCopyInto(&out.Target) - if in.Delivery != nil { - in, out := &in.Delivery, &out.Delivery - *out = new(v1.DeliverySpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSpec. -func (in *TriggerSpec) DeepCopy() *TriggerSpec { - if in == nil { - return nil - } - out := new(TriggerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerSpecBounded) DeepCopyInto(out *TriggerSpecBounded) { - *out = *in - in.TriggerSpec.DeepCopyInto(&out.TriggerSpec) - if in.Bounds != nil { - in, out := &in.Bounds, &out.Bounds - *out = new(TriggerBounds) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSpecBounded. -func (in *TriggerSpecBounded) DeepCopy() *TriggerSpecBounded { - if in == nil { - return nil - } - out := new(TriggerSpecBounded) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerStatus) DeepCopyInto(out *TriggerStatus) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - if in.TargetURI != nil { - in, out := &in.TargetURI, &out.TargetURI - *out = new(apis.URL) - (*in).DeepCopyInto(*out) - } - in.DeliveryStatus.DeepCopyInto(&out.DeliveryStatus) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatus. -func (in *TriggerStatus) DeepCopy() *TriggerStatus { - if in == nil { - return nil - } - out := new(TriggerStatus) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/brokers/backend/impl/redis/cmd.go b/pkg/brokers/backend/impl/redis/cmd.go deleted file mode 100644 index 81292533..00000000 --- a/pkg/brokers/backend/impl/redis/cmd.go +++ /dev/null @@ -1,54 +0,0 @@ -package redis - -import ( - "fmt" - "strings" -) - -type RedisArgs struct { - Address string `help:"Redis address." env:"ADDRESS" default:"0.0.0.0:6379"` - ClusterAddresses []string `help:"Redis address." env:"CLUSTER_ADDRESSES"` - - Username string `help:"Redis username." env:"USERNAME"` - Password string `help:"Redis password." env:"PASSWORD"` - Database int `help:"Database ordinal at Redis." env:"DATABASE" default:"0"` - TLSEnabled bool `help:"TLS enablement for Redis connection." env:"TLS_ENABLED" default:"false"` - TLSSkipVerify bool `help:"TLS skipping certificate verification." env:"TLS_SKIP_VERIFY" default:"false"` - TLSCertificate string `help:"TLS Certificate to connect to Redis." env:"TLS_CERTIFICATE"` - TLSKey string `help:"TLS Certificate key to connect to Redis." env:"TLS_KEY"` - TLSCACertificate string `help:"CA Certificate to connect to Redis." name:"tls-ca-certificate" env:"TLS_CA_CERTIFICATE"` - - Stream string `help:"Stream name that stores the broker's CloudEvents." env:"STREAM" default:"typhoon"` - Group string `help:"Redis stream consumer group name." env:"GROUP" default:"default"` - // Instance at the Redis stream consumer group. Copied from the InstanceName at the global args. - Instance string `kong:"-"` - - StreamMaxLen int `help:"Limit the number of items in a stream by trimming it. Set to 0 for unlimited." env:"STREAM_MAX_LEN" default:"1000"` - TrackingIDEnabled bool `help:"Enables adding Redis ID as a CloudEvent attribute." env:"TRACKING_ID_ENABLED" default:"false"` -} - -func (ra *RedisArgs) Validate() error { - msg := []string{} - - // Since there is a default value at addresses, we only check that cluster addresses and a value for - // and standalone instance that is different to the default must not be provided. - if len(ra.ClusterAddresses) != 0 && - ra.Address != "0.0.0.0:6379" && ra.Address != "" { - msg = append(msg, "Only one of address (standalone) or cluster addresses (cluster) arguments must be provided.") - } - - if ra.TLSCACertificate != "" && ra.TLSSkipVerify { - msg = append(msg, "only one of skip verify or CA certificate can be informed") - } - - if (ra.TLSCertificate != "" || ra.TLSKey != "") && - (ra.TLSCertificate == "" || ra.TLSKey == "") { - msg = append(msg, "TLS authentication requires Certificate and Key to be informed") - } - - if len(msg) == 0 { - return nil - } - - return fmt.Errorf(strings.Join(msg, " ")) -} diff --git a/pkg/brokers/backend/impl/redis/redis.go b/pkg/brokers/backend/impl/redis/redis.go deleted file mode 100644 index d0b53314..00000000 --- a/pkg/brokers/backend/impl/redis/redis.go +++ /dev/null @@ -1,358 +0,0 @@ -package redis - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "strconv" - "strings" - "sync" - "time" - - cloudevents "github.com/cloudevents/sdk-go/v2" - "go.uber.org/zap" - - goredis "github.com/redis/go-redis/v9" - - "github.com/zeiss/typhoon/pkg/brokers/backend" - "github.com/zeiss/typhoon/pkg/brokers/config/broker" -) - -const ( - // Default starting point for the consumer group. - defaultGroupStartID = "$" - - // Redis key at the message that contains the CloudEvent. - ceKey = "ce" - - // Disconnect timeout - disconnectTimeout = time.Second * 20 - - // Unsubscribe timeout - unsubscribeTimeout = time.Second * 10 -) - -func New(args *RedisArgs, logger *zap.SugaredLogger) backend.Interface { - return &redis{ - args: args, - logger: logger, - disconnecting: false, - subs: make(map[string]subscription), - } -} - -type redis struct { - args *RedisArgs - - client goredis.Cmdable - // Redis' Cmdable does not include the conneciton operation - // functions, we keep track of closing via this field. - clientClose func() error - - // subscription list indexed by the name. - subs map[string]subscription - // Waitgroup that should be used to wait for subscribers - // before disconnecting. - wgSubs sync.WaitGroup - - // disconnecting is set to avoid setting up new subscriptions - // when the broker is shutting down. - disconnecting bool - - ctx context.Context - logger *zap.SugaredLogger - mutex sync.Mutex -} - -func (s *redis) Info() *backend.Info { - return &backend.Info{ - Name: "Redis", - } -} - -func (s *redis) Init(ctx context.Context) error { - var tlscfg *tls.Config - if s.args.TLSEnabled { - tlscfg = &tls.Config{ - MinVersion: tls.VersionTLS12, - InsecureSkipVerify: s.args.TLSSkipVerify, - } - - roots := x509.NewCertPool() - if s.args.TLSCACertificate != "" { - if ok := roots.AppendCertsFromPEM([]byte(s.args.TLSCACertificate)); !ok { - return errors.New("not valid CA Cert format") - } - } - - tlscfg.RootCAs = roots - - if s.args.TLSCertificate != "" { - cert, err := tls.X509KeyPair([]byte(s.args.TLSCertificate), []byte(s.args.TLSKey)) - if err != nil { - return fmt.Errorf("TLS key pair should be PEM formatted: %w", err) - } - tlscfg.Certificates = append(tlscfg.Certificates, cert) - } - } - - if len(s.args.ClusterAddresses) != 0 { - s.logger.Info("Cluster client") - clusterclient := goredis.NewClusterClient(&goredis.ClusterOptions{ - Addrs: s.args.ClusterAddresses, - Username: s.args.Username, - Password: s.args.Password, - TLSConfig: tlscfg, - }) - - s.clientClose = clusterclient.Close - s.client = clusterclient - } else { - client := goredis.NewClient(&goredis.Options{ - Addr: s.args.Address, - Username: s.args.Username, - Password: s.args.Password, - DB: s.args.Database, - TLSConfig: tlscfg, - }) - - s.clientClose = client.Close - s.client = client - } - - return s.Probe(ctx) -} - -func (s *redis) Start(ctx context.Context) error { - s.ctx = ctx - <-ctx.Done() - - // This prevents new subscriptions from being setup - s.disconnecting = true - - s.mutex.Lock() - defer s.mutex.Unlock() - - for name := range s.subs { - s.unsubscribe(name) - } - - // wait for all subscriptions to finish - // before returning. - allSubsFinished := make(chan struct{}) - go func() { - defer close(allSubsFinished) - s.wgSubs.Wait() - }() - - select { - case <-allSubsFinished: - // Clean exit. - case <-time.After(disconnectTimeout): - // Timed out, some events have not been delivered. - s.logger.Error(fmt.Sprintf("Disconnection from Redis timed out after %d", disconnectTimeout)) - } - - return s.clientClose() -} - -func (s *redis) Produce(ctx context.Context, event *cloudevents.Event) error { - b, err := event.MarshalJSON() - if err != nil { - return fmt.Errorf("could not serialize CloudEvent: %w", err) - } - - args := &goredis.XAddArgs{ - Stream: s.args.Stream, - Values: map[string]interface{}{ceKey: b}, - } - - if s.args.StreamMaxLen != 0 { - args.MaxLen = int64(s.args.StreamMaxLen) - args.Approx = true - } - - res := s.client.XAdd(ctx, args) - - id, err := res.Result() - if err != nil { - return fmt.Errorf("could not produce CloudEvent to backend: %w", err) - } - - s.logger.Debug(fmt.Sprintf("CloudEvent %s/%s produced to the backend as %s", - event.Context.GetSource(), - event.Context.GetID(), - id)) - - return nil -} - -// SubscribeBounded is a variant of the Subscribe function that supports bounded subscriptions. -// It adds the option of using a startId and endId for the replay feature. -func (s *redis) Subscribe(name string, bounds *broker.TriggerBounds, ccb backend.ConsumerDispatcher, scb backend.SubscriptionStatusChange) error { - s.mutex.Lock() - defer s.mutex.Unlock() - - // avoid subscriptions if disconnection is going on - if s.disconnecting { - return errors.New("cannot create new subscriptions while disconnecting") - } - - if _, ok := s.subs[name]; ok { - return fmt.Errorf("subscription for %q alredy exists", name) - } - - startID, endID, err := boundsResolver(bounds) - if err != nil { - return fmt.Errorf("subscription bounds could not be resolved: %w", err) - } - - var exceedBoundCheck exceedBounds - if endID != "" { - exceedBoundCheck = newExceedBounds(endID) - } - - // Create the consumer group for this subscription. - group := s.args.Group + "." + name - res := s.client.XGroupCreateMkStream(s.ctx, s.args.Stream, group, startID) - _, err = res.Result() - if err != nil { - // Ignore errors when the group already exists. - if !strings.HasPrefix(err.Error(), "BUSYGROUP") { - return err - } - s.logger.Debug("Consumer group already exists", zap.String("group", group)) - } - - // We don't use the parent context but create a new one so that we can control - // how subscriptions are finished by calling cancel at our will, either when the - // global context is called, or when unsubscribing. - ctx, cancel := context.WithCancel(context.Background()) - - subs := subscription{ - instance: s.args.Instance, - stream: s.args.Stream, - name: name, - group: group, - checkBoundsExceeded: exceedBoundCheck, - - trackingEnabled: s.args.TrackingIDEnabled, - - // caller's callback for dispatching events from Redis. - ccbDispatch: ccb, - - // caller's callback for setting subscription status. - scb: scb, - - // cancel function let us control when we want to exit the subscription loop. - ctx: ctx, - cancel: cancel, - // stoppedCh signals when a subscription has completely finished. - stoppedCh: make(chan struct{}), - - client: s.client, - logger: s.logger, - } - - s.subs[name] = subs - s.wgSubs.Add(1) - subs.start() - - return nil -} - -func (s *redis) Unsubscribe(name string) { - s.mutex.Lock() - defer s.mutex.Unlock() - s.unsubscribe(name) -} - -// unsubscribe is not thread safe, caller should acquire -// the object's lock. -func (s *redis) unsubscribe(name string) { - sub, ok := s.subs[name] - if !ok { - s.logger.Infow("Unsubscribe action was not needed since the subscription did not exist", - zap.String("name", name)) - return - } - - // Finish the subscription's context. - sub.cancel() - - // Wait for the subscription to finish - select { - case <-sub.stoppedCh: - s.logger.Debugw("Graceful shutdown of subscription", zap.String("name", name)) - - // Clean exit. - case <-time.After(unsubscribeTimeout): - // Timed out, some events have not been delivered. - s.logger.Errorw(fmt.Sprintf("Unsubscribing from Redis timed out after %d", unsubscribeTimeout), - zap.String("name", name)) - } - - delete(s.subs, name) - s.wgSubs.Done() -} - -func (s *redis) Probe(ctx context.Context) error { - res := s.client.ClientID(ctx) - id, err := res.Result() - - if err == nil { - s.logger.Debugw("Probing redis", zap.Int64("client_id", id)) - return nil - } - - s.logger.Info("Probing redis with CLIENT ID command failed, trying PING command", zap.Error(err)) - - info := s.client.Ping(ctx) - result, err := info.Result() - - if err == nil { - s.logger.Debugw("Probing redis with PING command", zap.String("info", result)) - return nil - } - - // Add some context since Redis client sometimes is not clear about what failed. - return fmt.Errorf("failed probing Redis, using PING: %w", err) -} - -func boundsResolver(bounds *broker.TriggerBounds) (startID, endID string, e error) { - startID = defaultGroupStartID - - if bounds == nil { - return - } - - // Process date bounds. - if start := bounds.ByDate.GetStart(); start != "" { - st, err := time.Parse(time.RFC3339Nano, start) - if err != nil { - e = fmt.Errorf("parsing bounds start date: %w", err) - return - } - startID = strconv.FormatInt(st.UnixMilli(), 10) - } - if end := bounds.ByDate.GetEnd(); end != "" { - en, err := time.Parse(time.RFC3339, end) - if err != nil { - e = fmt.Errorf("parsing bounds end date: %w", err) - return - } - endID = strconv.FormatInt(en.UnixMilli(), 10) - } - - // Process ID bounds. - if start := bounds.ByID.GetStart(); start != "" { - startID = start - } - if end := bounds.ByID.GetEnd(); end != "" { - endID = end - } - - return -} diff --git a/pkg/brokers/backend/impl/redis/subscription.go b/pkg/brokers/backend/impl/redis/subscription.go deleted file mode 100644 index 6b1eac2e..00000000 --- a/pkg/brokers/backend/impl/redis/subscription.go +++ /dev/null @@ -1,201 +0,0 @@ -package redis - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - cloudevents "github.com/cloudevents/sdk-go/v2" - goredis "github.com/redis/go-redis/v9" - "go.uber.org/zap" - - "github.com/zeiss/typhoon/pkg/brokers/backend" - "github.com/zeiss/typhoon/pkg/brokers/status" -) - -const ( - BackendIDAttribute = "typhoonbackendid" -) - -type exceedBounds func(id string) bool - -func newExceedBounds(offset string) exceedBounds { - return func(id string) bool { - // Use the greater or equal here to make it - // exclusive on bounds. When the ID matches the - // one configured at the upper bound, the message - // wont be produced. - return id >= offset - } -} - -type subscription struct { - instance string - stream string - name string - group string - checkBoundsExceeded exceedBounds - - trackingEnabled bool - - // caller's callback for dispatching events from Redis. - ccbDispatch backend.ConsumerDispatcher - - // caller's callback for subscription status changes - scb backend.SubscriptionStatusChange - - // cancel function let us control when the subscription loop should exit. - ctx context.Context - cancel context.CancelFunc - // stoppedCh signals when a subscription has completely finished. - stoppedCh chan struct{} - - client goredis.Cmdable - logger *zap.SugaredLogger -} - -func (s *subscription) start() { - s.logger.Infow("Starting Redis subscription", - zap.String("group", s.group), - zap.String("instance", s.instance), - zap.String("stream", s.stream)) - // Start reading all pending messages - id := "0" - - // When the context is signaled mark an exitLoop flag to exit - // the worker routine gracefuly. - exitLoop := false - go func() { - <-s.ctx.Done() - - s.logger.Debugw("Waiting for last XReadGroup operation to finish before exiting subscription", - zap.String("group", s.group), - zap.String("instance", s.instance), - zap.String("stream", s.stream)) - exitLoop = true - }() - - go func() { - for { - // Check at the begining of each iteration if the exit loop flag has - // been signaled due to done context or because the endDate has been reached. - if exitLoop { - break - } - - // Although this call is blocking it will yield when the context is done, - // the exit loop flag above will be triggered almost immediately if no - // data has been read. - streams, err := s.client.XReadGroup(s.ctx, &goredis.XReadGroupArgs{ - Group: s.group, - Consumer: s.instance, - Streams: []string{s.stream, id}, - Count: 1, - // Setting block low since cancelling the context - // does not force the read to finish, making the process slow - // to exit. - Block: 3 * time.Second, - NoAck: false, - }).Result() - if err != nil { - // Ignore errors when the blocking period ends without - // receiving any event, and errors when the context is - // canceled - if !errors.Is(err, goredis.Nil) && - !strings.HasSuffix(err.Error(), "i/o timeout") && - err.Error() != "context canceled" { - s.logger.Errorw("Error reading CloudEvents from consumer group", zap.String("group", s.group), zap.Error(err)) - } - continue - } - - if len(streams) != 1 { - s.logger.Errorw("unexpected number of streams read", zap.Any("streams", streams)) - continue - } - - // If we are processing pending messages from Redis and we reach - // EOF, switch to reading new messages. - if len(streams[0].Messages) == 0 && id != ">" { - id = ">" - } - - for _, msg := range streams[0].Messages { - ce := &cloudevents.Event{} - for k, v := range msg.Values { - if k != ceKey { - s.logger.Debug(fmt.Sprintf("Ignoring non expected key at message from backend: %s", k)) - continue - } - - if err = ce.UnmarshalJSON([]byte(v.(string))); err != nil { - s.logger.Errorw("Could not unmarshal CloudEvent from Redis", zap.Error(err)) - continue - } - } - - // If there was no valid CE in the message ACK so that we do not receive it again. - if err = ce.Validate(); err != nil { - s.logger.Warn(fmt.Sprintf("Removing non CloudEvent message from backend: %s", msg.ID)) - if err = s.ack(msg.ID); err != nil { - s.logger.Errorw(fmt.Sprintf("could not ACK the Redis message %s containing a non valid CloudEvent", id), - zap.Error(err)) - } - - continue - } - - // If an end date has been specified, compare the current message ID - // with the end date. If the message ID is newer than the end date, - // exit the loop. - if s.checkBoundsExceeded != nil { - if exitLoop = s.checkBoundsExceeded(msg.ID); exitLoop { - s.scb(&status.SubscriptionStatus{ - Status: status.SubscriptionStatusComplete, - }) - break - } - } - - if s.trackingEnabled { - if err = ce.Context.SetExtension(BackendIDAttribute, msg.ID); err != nil { - s.logger.Errorw(fmt.Sprintf("could not set %s attributes for the Redis message %s. Tracking will not be possible.", BackendIDAttribute, msg.ID), - zap.Error(err)) - } - } - - go func(msgID string) { - s.ccbDispatch(ce) - if err := s.ack(msgID); err != nil { - s.logger.Errorw(fmt.Sprintf("could not ACK the Redis message %s containing CloudEvent %s", msgID, ce.Context.GetID()), - zap.Error(err)) - } - }(msg.ID) - - // If we are processing pending messages the ACK might take a - // while to be sent. We need to set the message ID so that the - // next requested element is not any of the pending being processed. - if id != ">" { - id = msg.ID - } - } - } - - s.logger.Debugw("Exited Redis subscription", - zap.String("group", s.group), - zap.String("instance", s.instance), - zap.String("stream", s.stream)) - - // Close stoppedCh to signal external viewers that processing for this - // subscription is no longer running. - close(s.stoppedCh) - }() -} - -func (s *subscription) ack(id string) error { - res := s.client.XAck(s.ctx, s.stream, s.group, id) - _, err := res.Result() - return err -} diff --git a/pkg/brokers/backend/interface.go b/pkg/brokers/backend/interface.go deleted file mode 100644 index 5f029aae..00000000 --- a/pkg/brokers/backend/interface.go +++ /dev/null @@ -1,68 +0,0 @@ -package backend - -import ( - "context" - - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/zeiss/typhoon/pkg/brokers/config/broker" - "github.com/zeiss/typhoon/pkg/brokers/status" -) - -type Info struct { - // Name of the backend implementation - Name string -} - -// ConsumerDispatcher receives CloudEvents to be delivered to subscribers. -// The consumer dispatcher must process the event for all subscriptions, -// including retries and dead leter queues. -// When the function finishes executing the backend will consider -// the event processed and will make sure it is not re-delivered. -type ConsumerDispatcher func(event *cloudevents.Event) - -type SubscriptionStatusChange func(*status.SubscriptionStatus) - -type EventProducer interface { - // Ingest a new CloudEvents at the backend. - Produce(context.Context, *cloudevents.Event) error -} - -type SubscribeOption func(Subscribable) - -type Subscribable interface { - // Subscribe is a method that sets up a reader that will retrieve - // events from the backend and pass them to the consumer dispatcher. - // When the consumer dispatcher returns, the message is marked as - // processed and won't be delivered anymore. - Subscribe(name string, bounds *broker.TriggerBounds, ccb ConsumerDispatcher, scb SubscriptionStatusChange) error - - // Unsubscribe is a method that removes a subscription referencing - // it by name, returning when all pending (already read) messages - // have been dispatched. - Unsubscribe(name string) -} - -type Interface interface { - EventProducer - Subscribable - - // Info returns information about the backend implementation. - Info() *Info - - // Init connects and does initialization tasks at the backend. - // It must be called before using other methods (but Info) - // and might perform initialization tasks like creating structures - // or migrations. - Init(ctx context.Context) error - - // Start is a blocking method that read events from the backend - // and pass them to the subscriber's consumer dispatcher. When the consumer - // dispatcher returns, the message is marked as processed and - // won't be delivered anymore. - // When the context is done all subscribers are finished and the - // method exists. - Start(ctx context.Context) error - - // Probe checks the overall status of the backend implementation. - Probe(context.Context) error -} diff --git a/pkg/brokers/broker/broker.go b/pkg/brokers/broker/broker.go deleted file mode 100644 index ab3c12ea..00000000 --- a/pkg/brokers/broker/broker.go +++ /dev/null @@ -1,375 +0,0 @@ -package broker - -import ( - "context" - "fmt" - "os" - "os/signal" - "path/filepath" - "syscall" - - "go.uber.org/zap" - "golang.org/x/sync/errgroup" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - - "github.com/zeiss/typhoon/pkg/brokers/backend" - "github.com/zeiss/typhoon/pkg/brokers/broker/cmd" - "github.com/zeiss/typhoon/pkg/brokers/common/fs" - "github.com/zeiss/typhoon/pkg/brokers/common/kubernetes/controller" - kstatus "github.com/zeiss/typhoon/pkg/brokers/common/kubernetes/status" - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" - cfgbpoller "github.com/zeiss/typhoon/pkg/brokers/config/broker/poller" - cfgbwatcher "github.com/zeiss/typhoon/pkg/brokers/config/broker/watcher" - cfgopoller "github.com/zeiss/typhoon/pkg/brokers/config/observability/poller" - cfgowatcher "github.com/zeiss/typhoon/pkg/brokers/config/observability/watcher" - "github.com/zeiss/typhoon/pkg/brokers/ingest" - "github.com/zeiss/typhoon/pkg/brokers/ingest/metrics" - "github.com/zeiss/typhoon/pkg/brokers/status" - "github.com/zeiss/typhoon/pkg/brokers/subscriptions" -) - -type Status string - -const ( - StatusStopped Status = "stopped" - StatusStarting Status = "starting" - StatusRunning Status = "running" - StatusStopping Status = "stopping" -) - -type Instance struct { - backend backend.Interface - ingest *ingest.Instance - subscription *subscriptions.Manager - bcw *cfgbwatcher.Watcher - ocw *cfgowatcher.Watcher - bcp *cfgbpoller.Poller - ocp *cfgopoller.Poller - km *controller.Manager - staticConfig *cfgbroker.Config - statusManager status.Manager - status Status - - logger *zap.SugaredLogger -} - -func NewInstance(globals *cmd.Globals, b backend.Interface) (*Instance, error) { - globals.Logger.Debug("Creating subscription manager") - - // Create status manager to be injected into ingest and subscription manager. - statusManager := status.NewManager( - /* Cached status expiry. When reached a re-write of the ConfigMap will be forced */ - globals.StatusForcePeriod, - /* Resync period */ - globals.StatusCheckPeriod, - globals.Logger.Named("status"), - ) - - if globals.KubernetesStatusConfigmapName != "" { - kc, err := client.New(config.GetConfigOrDie(), client.Options{}) - if err != nil { - return nil, err - } - - kbackend := kstatus.NewKubernetesBackend( - - // ConfigMap identification - globals.KubernetesStatusConfigmapName, - globals.KubernetesNamespace, - globals.KubernetesStatusConfigmapKey, - // Broker instance - globals.BrokerName, - // When to delete other instances that might have been deleted - // from the ConfigMap status report. - // We will use 3 times the force period, which means the status will - // be removed for any instance that fails to update at least 2 times - // and up to 3 times their status at the ConfigMap - globals.StatusForcePeriod*3, - kc, - globals.Logger.Named("kubestatus"), - ) - - statusManager.RegisterBackendStatusWriters(kbackend) - } - - // Create subscription manager. - sm, err := subscriptions.New(globals.Context, globals.Logger.Named("subs"), b, statusManager) - if err != nil { - return nil, err - } - - globals.Logger.Debug("Creating HTTP ingest server") - // Create metrics reporter. - ir, err := metrics.NewReporter(globals.Context) - if err != nil { - return nil, err - } - - i := ingest.NewInstance(ir, globals.Logger.Named("ingest"), - ingest.InstanceWithPort(globals.Port), - ingest.InstanceWithStatusManager(statusManager), - ) - - globals.Logger.Debug("Creating broker instance") - broker := &Instance{ - backend: b, - ingest: i, - subscription: sm, - statusManager: statusManager, - status: StatusStopped, - - logger: globals.Logger.Named("broker"), - } - - switch globals.ConfigMethod { - - case cmd.ConfigMethodFileWatcher: - // The ConfigWatcher will read the configfile and call registered - // callbacks upon start and everytime the configuration file - // is updated. - cfw, err := fs.NewCachedFileWatcher(globals.Logger.Named("fswatch")) - if err != nil { - return nil, err - } - - configPath, err := filepath.Abs(globals.BrokerConfigPath) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", globals.BrokerConfigPath, err) - } - - globals.Logger.Debugw("Creating watcher for broker configuration", zap.String("file", configPath)) - bcfgw, err := cfgbwatcher.NewWatcher(cfw, configPath, globals.Logger.Named("cgfwatch")) - if err != nil { - return nil, fmt.Errorf("error adding broker watcher for %q: %w", configPath, err) - } - - broker.bcw = bcfgw - - if globals.ObservabilityConfigPath != "" { - var ocfgw *cfgowatcher.Watcher - if globals.ObservabilityConfigPath != "" { - obsCfgPath, err := filepath.Abs(globals.ObservabilityConfigPath) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", globals.ObservabilityConfigPath, err) - } - - globals.Logger.Debugw("Creating watcher for observability configuration", zap.String("file", obsCfgPath)) - ocfgw, err = cfgowatcher.NewWatcher(cfw, obsCfgPath, globals.Logger.Named("ocgfwatch")) - if err != nil { - return nil, fmt.Errorf("error adding observability watcher for %q: %w", globals.ObservabilityConfigPath, err) - } - - ocfgw.AddCallback(globals.UpdateLogLevel) - ocfgw.AddCallback(globals.UpdateMetricsOptions) - broker.ocw = ocfgw - } - } - - case cmd.ConfigMethodKubernetesSecretMapWatcher: - km, err := controller.NewManager(globals.KubernetesNamespace, globals.Logger.Named("controller")) - if err != nil { - return nil, fmt.Errorf("error creating kubernetes controller manager: %w", err) - } - - if err = km.AddSecretControllerForBrokerConfig( - globals.KubernetesBrokerConfigSecretName, - globals.KubernetesBrokerConfigSecretKey); err != nil { - return nil, fmt.Errorf("error adding broker Secret reconciler to controller: %w", err) - } - - km.AddSecretCallbackForBrokerConfig(i.UpdateFromConfig) - km.AddSecretCallbackForBrokerConfig(sm.UpdateFromConfig) - - if globals.KubernetesObservabilityConfigMapName != "" { - if err = km.AddConfigMapControllerForObservability(globals.KubernetesObservabilityConfigMapName); err != nil { - return nil, fmt.Errorf("error adding observability ConfigMap reconciler to controller: %w", err) - } - km.AddConfigMapCallbackForObservabilityConfig(globals.UpdateLogLevel) - km.AddConfigMapCallbackForObservabilityConfig(globals.UpdateMetricsOptions) - } - - broker.km = km - - case cmd.ConfigMethodFilePoller: - p, err := fs.NewPoller(globals.PollingPeriod, globals.Logger.Named("poller")) - if err != nil { - return nil, fmt.Errorf("error creating file poller: %w", err) - } - - configPath, err := filepath.Abs(globals.BrokerConfigPath) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", globals.BrokerConfigPath, err) - } - - globals.Logger.Debugw("Creating poller for broker configuration", zap.String("file", configPath)) - bcfgp, err := cfgbpoller.NewPoller(p, configPath, globals.Logger.Named("cfgpoller")) - if err != nil { - return nil, fmt.Errorf("error adding broker poller for %q: %w", configPath, err) - } - - broker.bcp = bcfgp - - if globals.ObservabilityConfigPath != "" { - obsCfgPath, err := filepath.Abs(globals.ObservabilityConfigPath) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", globals.ObservabilityConfigPath, err) - } - - globals.Logger.Debugw("Creating poller for observability configuration", zap.String("file", obsCfgPath)) - ocfgp, err := cfgopoller.NewPoller(p, obsCfgPath, globals.Logger.Named("ocfgpoller")) - if err != nil { - return nil, fmt.Errorf("error adding observability poller for %q: %w", obsCfgPath, err) - } - - ocfgp.AddCallback(globals.UpdateLogLevel) - ocfgp.AddCallback(globals.UpdateMetricsOptions) - - broker.ocp = ocfgp - } - - case cmd.ConfigMethodInline: - // Observability options were already set at globals initialize. We - // only need to set ingest and subscription config here. - - cfg, err := cfgbroker.Parse(globals.BrokerConfig) - if err != nil { - return nil, fmt.Errorf("error parsing inline broker configuration: %w", err) - } - broker.staticConfig = cfg - } - - return broker, nil -} - -func (i *Instance) Start(inctx context.Context) error { - i.logger.Debug("Starting broker instance") - i.status = StatusStarting - i.ingest.RegisterProbeHandler(i.ProbeHandler) - - sigctx, stop := signal.NotifyContext(inctx, os.Interrupt, syscall.SIGTERM) - defer func() { - stop() - i.status = StatusStopped - }() - - grp, ctx := errgroup.WithContext(sigctx) - go func() { - <-ctx.Done() - // In case we receive the context done signal but the - // status was already set to Stopped. - if i.status != StatusStopped { - i.status = StatusStopping - } - }() - - // Launch status manager - i.logger.Debug("Starting status manager") - grp.Go(func() error { - i.statusManager.Start(ctx) - return nil - }) - - // Initialization will create structures, execute migrations - // and claim non processed messages from the backend. - i.logger.Debug("Initializing backend") - err := i.backend.Init(ctx) - if err != nil { - return fmt.Errorf("could not initialize backend: %w", err) - } - - // Start is a blocking function that will read messages from the backend - // implementation and send them to the subscription manager dispatcher. - // When the dispatcher returns the message is marked as processed. - i.logger.Debug("Starting backend routine") - grp.Go(func() error { - return i.backend.Start(ctx) - }) - - // Setup broker config file watchers only if configured. - if i.bcw != nil { - // ConfigWatcher will callback reconfigurations for: - // - Ingest: if authentication parameters are updated. - // - Subscription manager: if triggers configurations changes. - i.logger.Debug("Adding config watcher callbacks") - i.bcw.AddCallback(i.ingest.UpdateFromConfig) - i.bcw.AddCallback(i.subscription.UpdateFromConfig) - - // Start the configuration watcher for brokers. - // There is no need to add it to the wait group - // since it cleanly exits when context is done. - i.logger.Debug("Starting broker configuration watcher") - if err = i.bcw.Start(ctx); err != nil { - return fmt.Errorf("could not start broker configuration watcher: %w", err) - } - } - - // Start observability config file watchers only if configured. - if i.ocw != nil { - i.logger.Debug("Starting observability configuration watcher") - if err = i.ocw.Start(ctx); err != nil { - return fmt.Errorf("could not start observability configuration watcher: %w", err) - } - } - - if i.bcp != nil { - i.logger.Debug("Adding config poller callbacks") - i.bcp.AddCallback(i.ingest.UpdateFromConfig) - i.bcp.AddCallback(i.subscription.UpdateFromConfig) - - // Start the configuration poller for brokers. - // There is no need to add it to the wait group - // since it cleanly exits when context is done. - i.logger.Debug("Starting broker configuration poller") - if err = i.bcp.Start(ctx); err != nil { - return fmt.Errorf("could not start broker configuration poller: %w", err) - } - - } - - // Start observability config file pollers only if configured. - if i.ocp != nil { - i.logger.Debug("Starting observability configuration poller") - if err = i.ocp.Start(ctx); err != nil { - return fmt.Errorf("could not start observability configuration poller: %w", err) - } - } - - // Start controller only if kubernetes informers are configured. - if i.km != nil { - grp.Go(func() error { - err := i.km.Start(ctx) - return err - }) - } - - // Static config is configured once when starting. - if i.staticConfig != nil { - i.ingest.UpdateFromConfig(i.staticConfig) - i.subscription.UpdateFromConfig(i.staticConfig) - } - - // Register producer function for received events at ingest. - i.ingest.RegisterCloudEventHandler(i.backend.Produce) - - // TODO register probes at ingest - - // Start the server that ingests CloudEvents. - grp.Go(func() error { - err := i.ingest.Start(ctx) - return err - }) - - i.status = StatusRunning - - return grp.Wait() -} - -func (i *Instance) GetStatus() Status { - return i.status -} - -func (i *Instance) ProbeHandler() error { - // TODO check each service - return nil -} diff --git a/pkg/brokers/broker/cmd/globals.go b/pkg/brokers/broker/cmd/globals.go deleted file mode 100644 index 5e371cc7..00000000 --- a/pkg/brokers/broker/cmd/globals.go +++ /dev/null @@ -1,373 +0,0 @@ -package cmd - -import ( - "context" - "encoding/json" - "fmt" - "log" - "strings" - "time" - - "github.com/rickb777/date/period" - "go.uber.org/automaxprocs/maxprocs" - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - - knmetrics "knative.dev/pkg/metrics" - - "github.com/zeiss/typhoon/pkg/brokers/common/metrics" - "github.com/zeiss/typhoon/pkg/brokers/config/observability" -) - -const ( - metricsComponent = "broker" - - defaultBrokerConfigPath = "/etc/typhoon/broker.conf" -) - -type ConfigMethod int - -const ( - ConfigMethodUnknown = iota - ConfigMethodFileWatcher - ConfigMethodFilePoller - ConfigMethodKubernetesSecretMapWatcher - ConfigMethodInline -) - -type Globals struct { - BrokerConfigPath string `help:"Path to broker configuration file." env:"BROKER_CONFIG_PATH" default:"/etc/typhoon/broker.conf"` - ObservabilityConfigPath string `help:"Path to observability configuration file." env:"OBSERVABILITY_CONFIG_PATH"` - Port int `help:"HTTP Port to listen for CloudEvents." env:"PORT" default:"8080"` - BrokerName string `help:"Broker instance name. When running at Kubernetes should be set to RedisBroker name" env:"BROKER_NAME" default:"${hostname}"` - - // Config Polling is an alternative to the default file watcher for config files. - ConfigPollingPeriod string `help:"Period for polling the configuration files using ISO8601. A zero duration disables configuration by polling." env:"CONFIG_POLLING_PERIOD" default:"PT0S"` - - // Inline Configuration - BrokerConfig string `help:"JSON representation of broker configuration." env:"BROKER_CONFIG"` - ObservabilityConfig string `help:"JSON representation of observability configuration." env:"OBSERVABILITY_CONFIG"` - - // Kubernetes parameters - KubernetesNamespace string `help:"Namespace where the broker is running." env:"KUBERNETES_NAMESPACE"` - KubernetesBrokerConfigSecretName string `help:"Secret object name that contains the broker configuration." env:"KUBERNETES_BROKER_CONFIG_SECRET_NAME"` - KubernetesBrokerConfigSecretKey string `help:"Secret object key that contains the broker configuration." env:"KUBERNETES_BROKER_CONFIG_SECRET_KEY"` - KubernetesObservabilityConfigMapName string `help:"ConfigMap object name that contains the observability configuration." env:"KUBERNETES_OBSERVABILITY_CONFIGMAP_NAME"` - KubernetesStatusConfigmapName string `help:"ConfigMap object name where the broker instance should write its status." env:"KUBERNETES_STATUS_CONFIGMAP_NAME"` - KubernetesStatusConfigmapKey string `help:"ConfigMap object key where the broker should write its status." env:"KUBERNETES_STATUS_CONFIGMAP_KEY" default:"status"` - StatusReporterResyncCheckPeriod string `help:"Period for running status checks for pending changes, using ISO8601." env:"STATUS_REPORTER_RESYNC_CHECK_PERIOD" default:"PT10S"` - StatusReporterResyncForcePeriod string `help:"Period for running status resync cycles that force status writes, using ISO8601." env:"STATUS_REPORTER_RESYNC_FORCE_PERIOD" default:"PT1M"` - - ObservabilityMetricsDomain string `help:"Domain to be used for some metrics reporters." env:"OBSERVABILITY_METRICS_DOMAIN" default:"typhoon.zeiss.com/eventing"` - - Context context.Context `kong:"-"` - Logger *zap.SugaredLogger `kong:"-"` - LogLevel zap.AtomicLevel `kong:"-"` - PollingPeriod time.Duration `kong:"-"` - ConfigMethod ConfigMethod `kong:"-"` - StatusCheckPeriod time.Duration `kong:"-"` - StatusForcePeriod time.Duration `kong:"-"` -} - -func (s *Globals) Validate() error { - msg := []string{} - - // We need to sort out if ConfigPollingPeriod is not 0 before - // finding out the config method - if s.ConfigPollingPeriod != "" { - p, err := period.Parse(s.ConfigPollingPeriod) - if err != nil { - // try to parse go duration for backwards compatibility. - gd, gderr := time.ParseDuration(s.ConfigPollingPeriod) - if gderr != nil { - // go time parsing failed, we assume that the incoming parameter was ISO8601 - // for the error message. - msg = append(msg, fmt.Sprintf("Config polling period is not an ISO8601 duration: %v", err)) - } else { - // configure using go time - // TODO cast a warning. - s.PollingPeriod = gd - } - } else { - s.PollingPeriod = p.DurationApprox() - } - } - - // Broker config must be configured - if s.BrokerConfigPath == "" && - (s.KubernetesBrokerConfigSecretName == "" || s.KubernetesBrokerConfigSecretKey == "") && - s.BrokerConfig == "" { - msg = append(msg, "Broker configuration path, Kubernetes Secret, or inline configuration must be informed.") - } - - if s.KubernetesNamespace == "" && - (s.KubernetesStatusConfigmapName != "" || - s.KubernetesBrokerConfigSecretName != "" || - s.KubernetesBrokerConfigSecretKey != "") { - msg = append(msg, "Kubernetes namespace must be informed.") - } - - switch { - case s.KubernetesBrokerConfigSecretName != "" || s.KubernetesBrokerConfigSecretKey != "": - s.ConfigMethod = ConfigMethodKubernetesSecretMapWatcher - - if s.KubernetesBrokerConfigSecretName == "" || s.KubernetesBrokerConfigSecretKey == "" { - msg = append(msg, "Broker configuration for Kubernetes must inform both secret name and key.") - } - - // Local file config path should be either empty or the default, which is considered empty - // when Kubernetes configuration is informed. - if s.BrokerConfigPath != "" && s.BrokerConfigPath != defaultBrokerConfigPath { - msg = append(msg, "Cannot use Broker file for configuration when a Kubernetes Secret is used for the broker.") - } - - // Local file config path should be either empty or the default, which is considered empty - // when Kubernetes configuration is informed. - if s.ObservabilityConfigPath != "" { - msg = append(msg, "Local file observability configuration cannot be used along with the Kubernetes Secret configuration.") - } - - if s.BrokerConfig != "" || s.ObservabilityConfig != "" { - msg = append(msg, "Inline config cannot be used along with the Kubernetes Secret configuration.") - } - - case s.BrokerConfig != "": - // Local file config path should be either empty or the default, which is considered empty - // when Kubernetes configuration is informed. - if s.BrokerConfigPath != "" && s.BrokerConfigPath != defaultBrokerConfigPath { - msg = append(msg, "Inline config cannot be used along with local file configuration.") - break - } - - s.ConfigMethod = ConfigMethodInline - - case s.BrokerConfigPath != "": - if s.PollingPeriod == 0 { - s.ConfigMethod = ConfigMethodFileWatcher - } else { - s.ConfigMethod = ConfigMethodFilePoller - } - - if s.KubernetesBrokerConfigSecretName != "" || s.KubernetesBrokerConfigSecretKey != "" { - msg = append(msg, "Cannot inform Broker Secret and File for broker configuration.") - } - - if s.KubernetesObservabilityConfigMapName != "" { - msg = append(msg, "Cannot inform Observability ConfigMap when a file is used for broker configuration.") - } - - if s.KubernetesStatusConfigmapName == "" && s.KubernetesNamespace != "" { - msg = append(msg, "Kubernetes namespace must not be informed when local File configuration is used.") - } - - if s.BrokerConfig != "" || s.ObservabilityConfig != "" { - msg = append(msg, "Inline config cannot be used along with local file configuration.") - } - - default: - msg = append(msg, "Either Kubernetes Secret or local file configuration must be informed.") - } - - // parse durations for resync and expired cache. - p, err := period.Parse(s.StatusReporterResyncCheckPeriod) - if err != nil { - msg = append(msg, fmt.Sprintf("status resync check period is not an ISO8601 duration: %v", err)) - } else { - s.StatusCheckPeriod = p.DurationApprox() - } - - p, err = period.Parse(s.StatusReporterResyncForcePeriod) - if err != nil { - msg = append(msg, fmt.Sprintf("status resync force period is not an ISO8601 duration: %v", err)) - } else { - s.StatusForcePeriod = p.DurationApprox() - } - - if len(msg) != 0 { - s.ConfigMethod = ConfigMethodUnknown - return fmt.Errorf(strings.Join(msg, " ")) - } - - return nil -} - -func (s *Globals) Initialize() error { - var cfg *observability.Config - var l *zap.Logger - defaultConfigApplied := false - var err error - - undo, err := maxprocs.Set() - if err != nil { - return fmt.Errorf("could not match available CPUs to processes %w", err) - } - defer undo() - - switch { - case s.ObservabilityConfigPath != "": - // Read before starting the watcher to use it with the - // start routines. - cfg, err = observability.ReadFromFile(s.ObservabilityConfigPath) - if err != nil || cfg.LoggerCfg == nil { - log.Printf("Could not appliying provided config: %v", err) - defaultConfigApplied = true - } - - case s.ObservabilityConfig != "": - data := map[string]string{} - err = json.Unmarshal([]byte(s.ObservabilityConfig), &data) - if err != nil { - log.Printf("Could not appliying provided config: %v", err) - defaultConfigApplied = true - break - } - - cfg, err = observability.ParseFromMap(data) - if err != nil || cfg.LoggerCfg == nil { - log.Printf("Could not appliying provided config: %v", err) - defaultConfigApplied = true - } - - case s.KubernetesObservabilityConfigMapName != "": - kc, err := client.New(config.GetConfigOrDie(), client.Options{}) - if err != nil { - return err - } - - cm := &corev1.ConfigMap{} - var lastErr error - - if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { - lastErr = kc.Get(s.Context, client.ObjectKey{ - Namespace: s.KubernetesNamespace, - Name: s.KubernetesObservabilityConfigMapName, - }, cm) - - return lastErr == nil || apierrors.IsNotFound(lastErr), nil - }); err != nil { - log.Printf("Could not retrieve observability ConfigMap %q: %v", - s.KubernetesObservabilityConfigMapName, err) - defaultConfigApplied = true - } - - cfg, err = observability.ParseFromMap(cm.Data) - if err != nil || cfg.LoggerCfg == nil { - log.Printf("Could not apply provided config from ConfigMap %q: %v", - s.KubernetesObservabilityConfigMapName, err) - defaultConfigApplied = true - } - - default: - log.Print("Applying default observability configuration") - defaultConfigApplied = true - } - - if defaultConfigApplied { - cfg = observability.DefaultConfig() - } - - // Call build to perform validation of zap configuration. - l, err = cfg.LoggerCfg.Build() - for { - if err == nil { - break - } - if defaultConfigApplied { - return fmt.Errorf("default config failed to be applied due to error: %w", err) - } - - defaultConfigApplied = true - cfg = observability.DefaultConfig() - l, err = cfg.LoggerCfg.Build() - } - - s.LogLevel = cfg.LoggerCfg.Level - - s.Logger = l.Sugar() - s.LogLevel = cfg.LoggerCfg.Level - - // Setup metrics and start exporter. - knmetrics.MemStatsOrDie(s.Context) - s.Context = metrics.InitializeReportingContext(s.Context, s.BrokerName) - s.UpdateMetricsOptions(cfg) - - switch { - case s.KubernetesStatusConfigmapName != "": - kc, err := client.New(config.GetConfigOrDie(), client.Options{}) - if err != nil { - return err - } - - var lastErr error - - if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { - cm := &corev1.ConfigMap{} - lastErr = kc.Get(s.Context, client.ObjectKey{ - Namespace: s.KubernetesNamespace, - Name: s.KubernetesStatusConfigmapName, - }, cm) - - return lastErr == nil || apierrors.IsNotFound(lastErr), nil - }); err != nil { - log.Printf("Could not retrieve status ConfigMap %q: %v", - s.KubernetesStatusConfigmapName, err) - } - - default: - // No status management by default - - } - - return nil -} - -func (s *Globals) Flush() { - if s.Logger != nil { - _ = s.Logger.Sync() - } - knmetrics.FlushExporter() -} - -func (s *Globals) UpdateMetricsOptions(cfg *observability.Config) { - s.Logger.Debugw("Updating metrics configuration.") - if cfg == nil || cfg.MetricsConfig == nil { - return - } - - m, err := cfg.ToMap() - if err != nil { - s.Logger.Errorw("Failed to parse config into map", zap.Error(err)) - return - } - - if err = knmetrics.UpdateExporter( - s.Context, - knmetrics.ExporterOptions{ - Domain: s.ObservabilityMetricsDomain, - Component: metricsComponent, - ConfigMap: m, - PrometheusPort: cfg.PrometheusPort, - }, - s.Logger); err != nil { - s.Logger.Errorw("failed to update metrics exporter", zap.Error(err)) - } -} - -func (s *Globals) UpdateLogLevel(cfg *observability.Config) { - s.Logger.Debugw("Updating logging configuration.") - if cfg == nil || cfg.LoggerCfg == nil { - return - } - - level := cfg.LoggerCfg.Level.Level() - s.Logger.Debugw("Updating logging level", zap.Any("level", level)) - if s.LogLevel.Level() != level { - s.Logger.Infof("Updating logging level from %v to %v.", s.LogLevel.Level(), level) - s.LogLevel.SetLevel(level) - } -} diff --git a/pkg/brokers/common/fs/fake/fake_filewatcher.go b/pkg/brokers/common/fs/fake/fake_filewatcher.go deleted file mode 100644 index 6c496293..00000000 --- a/pkg/brokers/common/fs/fake/fake_filewatcher.go +++ /dev/null @@ -1,55 +0,0 @@ -package fs - -import ( - "context" - "fmt" - "sync" - - "github.com/zeiss/typhoon/pkg/brokers/common/fs" -) - -type FakeFileWatcher interface { - fs.FileWatcher - DoCallback(path string) error -} - -type fakeFileWatcher struct { - watchedFiles map[string][]fs.WatchCallback - - m sync.RWMutex -} - -func NewFileWatcher() FakeFileWatcher { - return &fakeFileWatcher{ - watchedFiles: make(map[string][]fs.WatchCallback), - } -} - -func (cw *fakeFileWatcher) Start(_ context.Context) {} - -func (cw *fakeFileWatcher) Add(path string, cb fs.WatchCallback) error { - cw.m.Lock() - defer cw.m.Unlock() - - if _, ok := cw.watchedFiles[path]; !ok { - cw.watchedFiles[path] = []fs.WatchCallback{} - } - cw.watchedFiles[path] = append(cw.watchedFiles[path], cb) - - return nil -} - -func (cw *fakeFileWatcher) DoCallback(path string) error { - cw.m.RLock() - defer cw.m.RUnlock() - - cbs, ok := cw.watchedFiles[path] - if !ok { - return fmt.Errorf("path %q is not being watched", path) - } - - for _, cb := range cbs { - cb() - } - return nil -} diff --git a/pkg/brokers/common/fs/fake/fake_filewatcher_cached.go b/pkg/brokers/common/fs/fake/fake_filewatcher_cached.go deleted file mode 100644 index e928633e..00000000 --- a/pkg/brokers/common/fs/fake/fake_filewatcher_cached.go +++ /dev/null @@ -1,71 +0,0 @@ -package fs - -import ( - "context" - "fmt" - "sync" - - "github.com/zeiss/typhoon/pkg/brokers/common/fs" -) - -type FakeCachedFileWatcher interface { - fs.CachedFileWatcher - SetContent(path string, content []byte) error -} - -type watchedItem struct { - content []byte -} - -type fakeCachedFileWatcher struct { - watchedFiles map[string]*watchedItem - - m sync.RWMutex -} - -func NewCachedFileWatcher() FakeCachedFileWatcher { - return &fakeCachedFileWatcher{ - watchedFiles: make(map[string]*watchedItem), - } -} - -func (ccw *fakeCachedFileWatcher) Start(_ context.Context) {} - -func (ccw *fakeCachedFileWatcher) Add(path string, cb fs.CachedWatchCallback) error { - ccw.m.Lock() - defer ccw.m.Unlock() - - if _, ok := ccw.watchedFiles[path]; !ok { - ccw.watchedFiles[path] = nil - } - - return nil -} - -func (ccw *fakeCachedFileWatcher) GetContent(path string) ([]byte, error) { - ccw.m.RLock() - defer ccw.m.RUnlock() - - watched, ok := ccw.watchedFiles[path] - if !ok { - return nil, fmt.Errorf("file %q is not being watched", path) - } - - if watched != nil { - return watched.content, nil - } - - return nil, nil -} - -func (ccw *fakeCachedFileWatcher) SetContent(path string, content []byte) error { - ccw.m.Lock() - defer ccw.m.Unlock() - - if _, ok := ccw.watchedFiles[path]; !ok { - return fmt.Errorf("file %q is not being watched", path) - } - - ccw.watchedFiles[path].content = content - return nil -} diff --git a/pkg/brokers/common/fs/filewatcher.go b/pkg/brokers/common/fs/filewatcher.go deleted file mode 100644 index 5fb9cf1b..00000000 --- a/pkg/brokers/common/fs/filewatcher.go +++ /dev/null @@ -1,139 +0,0 @@ -package fs - -import ( - "context" - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/fsnotify/fsnotify" - "go.uber.org/zap" -) - -// WatchCallback is called when a watched file -// is updated. -type WatchCallback func() - -// FileWatcher object tracks changes to files. -type FileWatcher interface { - Add(path string, cb WatchCallback) error - Start(ctx context.Context) -} - -type fileWatcher struct { - watcher *fsnotify.Watcher - watchedFiles map[string][]WatchCallback - - m sync.RWMutex - start sync.Once - logger *zap.SugaredLogger -} - -// NewWatcher creates a new FileWatcher object that register files -// and calls back when they change. -func NewWatcher(logger *zap.SugaredLogger) (FileWatcher, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, err - } - - return &fileWatcher{ - watcher: watcher, - watchedFiles: make(map[string][]WatchCallback), - logger: logger, - }, nil -} - -// Add path/callback tuple to the FileWatcher. -func (cw *fileWatcher) Add(path string, cb WatchCallback) error { - absPath, err := filepath.Abs(path) - if err != nil { - return fmt.Errorf("error resolving to absolute path %q: %w", path, err) - } - - if absPath != path { - return fmt.Errorf("configuration path %q needs to be abstolute", path) - } - - cw.m.Lock() - defer cw.m.Unlock() - - cw.logger.Infow("Adding file to watch", zap.String("file", path)) - if _, ok := cw.watchedFiles[path]; !ok { - if err := cw.watcher.Add(path); err != nil { - return err - } - cw.watchedFiles[path] = []WatchCallback{cb} - return nil - } - - cw.watchedFiles[path] = append(cw.watchedFiles[path], cb) - return nil -} - -// Start the FileWatcher process. -func (cw *fileWatcher) Start(ctx context.Context) { - cw.start.Do(func() { - // Do not block, exit on context done. - go func() { - defer cw.watcher.Close() - for { - select { - case e, ok := <-cw.watcher.Events: - if !ok { - // watcher event channel finished - return - } - - if e.Op&fsnotify.Remove == fsnotify.Remove { - if fileExist(e.Name) { - if err := cw.watcher.Add(e.Name); err != nil { - cw.logger.Errorw( - fmt.Sprintf("could not add the path %q back to the watcher", e.Name), - zap.Error(err)) - } - } - continue - } - - cw.m.RLock() - cbs, ok := cw.watchedFiles[e.Name] - if !ok { - cw.logger.Warnw("Received a notification for a non watched file", zap.String("file", e.Name)) - } - - for _, cb := range cbs { - cb() - } - cw.m.RUnlock() - - case err, ok := <-cw.watcher.Errors: - if !ok { - // watcher error channel finished - return - } - cw.logger.Errorw("Error watching files", zap.Error(err)) - - case <-ctx.Done(): - cw.logger.Debug("Exiting file watcher process") - return - } - } - }() - }) -} - -func fileExist(file string) bool { - _, err := os.Stat(file) - if err != nil { - if os.IsExist(err) { - return true - } - if os.IsNotExist(err) { - return false - } - return false - } - return true -} diff --git a/pkg/brokers/common/fs/filewatcher_cached.go b/pkg/brokers/common/fs/filewatcher_cached.go deleted file mode 100644 index 9c960124..00000000 --- a/pkg/brokers/common/fs/filewatcher_cached.go +++ /dev/null @@ -1,109 +0,0 @@ -package fs - -import ( - "context" - "fmt" - "os" - "sync" - - "go.uber.org/zap" -) - -// CachedFileWatcher is a FileWatcher that caches and tracks the contents -// of watched files. -type CachedFileWatcher interface { - // Start the FileWatcher process. - Start(ctx context.Context) - // Add a file path to be watched. - Add(path string, cb CachedWatchCallback) error - // GetContent of watched file. - GetContent(path string) ([]byte, error) -} - -type cachedFileWatcher struct { - cw FileWatcher - watchedFiles map[string][]byte - - m sync.RWMutex - logger *zap.SugaredLogger -} - -// NewCachedFileWatcher creates a new FileWatcher object that register files -// and calls back when they change. -func NewCachedFileWatcher(logger *zap.SugaredLogger) (CachedFileWatcher, error) { - cw, err := NewWatcher(logger) - if err != nil { - return nil, err - } - - return &cachedFileWatcher{ - watchedFiles: make(map[string][]byte), - cw: cw, - logger: logger, - }, nil -} - -// Start the FileWatcher process. -func (ccw *cachedFileWatcher) Start(ctx context.Context) { - ccw.cw.Start(ctx) -} - -// updateContentFromFile does not locks the watchedFiles map, it is up -// to the caller to do so. -func (ccw *cachedFileWatcher) updateContentFromFile(path string) error { - content, err := os.ReadFile(path) - if err != nil { - return err - } - - ccw.watchedFiles[path] = content - return nil -} - -func (ccw *cachedFileWatcher) callback(path string, cb CachedWatchCallback) WatchCallback { - return func() { - ccw.m.Lock() - defer ccw.m.Unlock() - if err := ccw.updateContentFromFile(path); err != nil { - ccw.logger.Errorw("Could not read watched file", zap.Error(err)) - } - - // Call user's callback - cb(ccw.watchedFiles[path]) - } -} - -type CachedWatchCallback func(content []byte) - -// Add a file path to be watched. -func (ccw *cachedFileWatcher) Add(path string, cb CachedWatchCallback) error { - if err := ccw.cw.Add(path, ccw.callback(path, cb)); err != nil { - return err - } - - ccw.m.Lock() - defer ccw.m.Unlock() - if _, ok := ccw.watchedFiles[path]; !ok { - if err := ccw.updateContentFromFile(path); err != nil { - ccw.logger.Errorw("Could not get content from file", zap.Error(err)) - // initialize to be able to distinguish paths not being watched - // and those being watched but not available. - ccw.watchedFiles[path] = nil - } - } - - return nil -} - -// GetContent of watched file. -func (ccw *cachedFileWatcher) GetContent(path string) ([]byte, error) { - ccw.m.RLock() - defer ccw.m.RUnlock() - - content, ok := ccw.watchedFiles[path] - if !ok { - return nil, fmt.Errorf("file %q is not being watched", path) - } - - return content, nil -} diff --git a/pkg/brokers/common/fs/poller.go b/pkg/brokers/common/fs/poller.go deleted file mode 100644 index e07f4d08..00000000 --- a/pkg/brokers/common/fs/poller.go +++ /dev/null @@ -1,127 +0,0 @@ -package fs - -import ( - "bytes" - "context" - "fmt" - "os" - "path/filepath" - "sync" - "time" - - "go.uber.org/zap" -) - -type PollerCallback func(content []byte) - -type Poller interface { - Add(path string, cb PollerCallback) error - Start(ctx context.Context) - GetContent(path string) ([]byte, error) -} - -type pollFile struct { - cbs []PollerCallback - cachedContents []byte -} - -type poller struct { - polledFiles map[string]*pollFile - - period time.Duration - m sync.RWMutex - start sync.Once - logger *zap.SugaredLogger -} - -func NewPoller(period time.Duration, logger *zap.SugaredLogger) (Poller, error) { - return &poller{ - period: period, - logger: logger, - polledFiles: map[string]*pollFile{}, - }, nil -} - -func (p *poller) GetContent(path string) ([]byte, error) { - p.m.RLock() - defer p.m.RUnlock() - - pf, ok := p.polledFiles[path] - if !ok { - return nil, fmt.Errorf("file %q is not being polled", path) - } - - return pf.cachedContents, nil -} - -func (p *poller) Add(path string, cb PollerCallback) error { - absPath, err := filepath.Abs(path) - if err != nil { - return fmt.Errorf("error resolving to absolute path %q: %w", path, err) - } - - if absPath != path { - return fmt.Errorf("configuration path %q needs to be abstolute", path) - } - - p.m.Lock() - defer p.m.Unlock() - - p.logger.Infow("Adding file to poller", zap.String("file", path)) - if _, ok := p.polledFiles[path]; !ok { - - p.polledFiles[path] = &pollFile{cbs: []PollerCallback{cb}} - return nil - } - - pf := p.polledFiles[path] - pf.cbs = append(pf.cbs, cb) - p.polledFiles[path] = pf - - // force first polling - p.poll() - - return nil -} - -func (p *poller) Start(ctx context.Context) { - p.start.Do(func() { - ticker := time.NewTicker(p.period) - // Do not block, exit on context done. - go func() { - for { - - p.poll() - - select { - case <-ctx.Done(): - p.logger.Debug("Exiting file poller process") - return - case <-ticker.C: - // file polling at the start of the loop - } - } - }() - }) -} - -func (p *poller) poll() { - p.m.RLock() - defer p.m.RUnlock() - - for file, pf := range p.polledFiles { - b, err := os.ReadFile(file) - if err != nil { - p.logger.Errorw("cannot poll file", zap.String("filed", file), zap.Error(err)) - } - - if !bytes.Equal(pf.cachedContents, b) { - p.logger.Infow("Existing", zap.String("contents", string(pf.cachedContents))) - p.logger.Infow("New", zap.String("contents", string(b))) - pf.cachedContents = b - for _, cb := range pf.cbs { - cb(b) - } - } - } -} diff --git a/pkg/brokers/common/kubernetes/controller/configmap.go b/pkg/brokers/common/kubernetes/controller/configmap.go deleted file mode 100644 index e14283d6..00000000 --- a/pkg/brokers/common/kubernetes/controller/configmap.go +++ /dev/null @@ -1,56 +0,0 @@ -package controller - -import ( - "context" - "fmt" - - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/zeiss/typhoon/pkg/brokers/config/observability" -) - -type ConfigMapObservabilityCallback func(cfg *observability.Config) - -// reconcileObservabilityConfigMap reconciles the observability ConfigMap. -type reconcileObservabilityConfigMap struct { - name string - cbs []ConfigMapObservabilityCallback - - client client.Client - logger *zap.SugaredLogger -} - -// Implement reconcile.Reconciler so the controller can reconcile objects -var _ reconcile.Reconciler = &reconcileObservabilityConfigMap{} - -func (r *reconcileObservabilityConfigMap) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - cm := &corev1.ConfigMap{} - err := r.client.Get(ctx, request.NamespacedName, cm) - if errors.IsNotFound(err) { - r.logger.Errorw("could not find ConfigMap", zap.String("name", request.NamespacedName.String())) - return reconcile.Result{}, nil - } - - if err != nil { - return reconcile.Result{}, fmt.Errorf("could not fetch ConfigMap: %w", err) - } - - r.logger.Infow("Reconciling ConfigMap", zap.String("name", cm.Name)) - cfg, err := observability.ParseFromMap(cm.Data) - if err != nil { - return reconcile.Result{}, fmt.Errorf("error parsing observability config from ConfigMap %q: %w", cm.Name, err) - } - - for _, cb := range r.cbs { - cb(cfg) - } - - return reconcile.Result{}, nil -} diff --git a/pkg/brokers/common/kubernetes/controller/manager.go b/pkg/brokers/common/kubernetes/controller/manager.go deleted file mode 100644 index 1b62fa01..00000000 --- a/pkg/brokers/common/kubernetes/controller/manager.go +++ /dev/null @@ -1,117 +0,0 @@ -package controller - -import ( - "context" - "fmt" - - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - crctrl "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - crlog "sigs.k8s.io/controller-runtime/pkg/log" - crzap "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" -) - -type SecretBrokerConfigCallback func(*cfgbroker.Config) - -type Manager struct { - manager manager.Manager - rs *reconcileBrokerConfigSecret - rcm *reconcileObservabilityConfigMap - - logger *zap.SugaredLogger -} - -func NewManager(namespace string, logger *zap.SugaredLogger) (*Manager, error) { - // There is no easy way of bridging the gap between Knative style - // and controller-runtime style loggers. - crlog.SetLogger(crzap.New()) - - mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{}) - if err != nil { - return nil, fmt.Errorf("unable to set up controller manager: %w", err) - } - - return &Manager{ - manager: mgr, - logger: logger, - }, nil -} - -func (m *Manager) AddSecretControllerForBrokerConfig(name, key string) error { - m.logger.Infow("Setting up Secret controller for broker config", zap.String("name", name), zap.String("key", key)) - m.rs = &reconcileBrokerConfigSecret{ - name: name, - key: key, - client: m.manager.GetClient(), - logger: m.logger, - } - - c, err := crctrl.New("broker-config-secret-controller", m.manager, crctrl.Options{ - Reconciler: m.rs, - }) - if err != nil { - return fmt.Errorf("unable to set up Secret controller: %w", err) - } - if err := c.Watch( - source.Kind(m.manager.GetCache(), &corev1.Secret{}), - &handler.EnqueueRequestForObject{}, - predicate.NewPredicateFuncs(func(o client.Object) bool { return o.GetName() == name }), - ); err != nil { - return fmt.Errorf("unable to set watch for Secret: %w", err) - } - - return nil -} - -func (m *Manager) AddSecretCallbackForBrokerConfig(cb SecretBrokerConfigCallback) { - m.rs.cbs = append(m.rs.cbs, cb) -} - -func (m *Manager) AddConfigMapControllerForObservability(name string) error { - m.logger.Info("Setting up ConfigMap controller for observability") - m.rcm = &reconcileObservabilityConfigMap{ - name: name, - client: m.manager.GetClient(), - logger: m.logger, - } - - c, err := crctrl.New("observability-configmap-controller", m.manager, crctrl.Options{ - Reconciler: m.rcm, - }) - if err != nil { - return fmt.Errorf("unable to set up ConfigMap controller: %w", err) - } - if err := c.Watch( - source.Kind(m.manager.GetCache(), &corev1.ConfigMap{}), - &handler.EnqueueRequestForObject{}, - predicate.NewPredicateFuncs(func(o client.Object) bool { return o.GetName() == name }), - ); err != nil { - return fmt.Errorf("unable to set watch for ConfigMaps: %w", err) - } - - return nil -} - -func (m *Manager) AddConfigMapCallbackForObservabilityConfig(cb ConfigMapObservabilityCallback) { - m.rcm.cbs = append(m.rcm.cbs, cb) -} - -func (m *Manager) Start(ctx context.Context) error { - m.logger.Info("Starting manager") - if err := m.manager.Start(ctx); err != nil { - return fmt.Errorf("unable to run controller manager: %w", err) - } - - return nil -} diff --git a/pkg/brokers/common/kubernetes/controller/secret.go b/pkg/brokers/common/kubernetes/controller/secret.go deleted file mode 100644 index 5186e400..00000000 --- a/pkg/brokers/common/kubernetes/controller/secret.go +++ /dev/null @@ -1,67 +0,0 @@ -package controller - -import ( - "context" - "fmt" - - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" -) - -// reconcileSecret reconciles the Secret. -type reconcileBrokerConfigSecret struct { - name string - key string - cbs []SecretBrokerConfigCallback - - client client.Client - logger *zap.SugaredLogger -} - -// Implement reconcile.Reconciler so the controller can reconcile objects -var _ reconcile.Reconciler = &reconcileBrokerConfigSecret{} - -func (r *reconcileBrokerConfigSecret) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - s := &corev1.Secret{} - err := r.client.Get(ctx, request.NamespacedName, s) - if errors.IsNotFound(err) { - r.logger.Errorw("could not find Secret", zap.String("name", request.NamespacedName.String())) - return reconcile.Result{}, nil - } - - if err != nil { - return reconcile.Result{}, fmt.Errorf("could not fetch Secret: %w", err) - } - - r.logger.Infow("Reconciling Secret", zap.String("name", s.Name)) - content, ok := s.Data[r.key] - if !ok { - r.logger.Errorw("empty Secret", zap.String("name", s.Name)) - return reconcile.Result{}, nil - } - - if len(content) == 0 { - // Discard file events that do not inform content. - r.logger.Debugw("Received secret with empty contents", zap.String("name", s.Name)) - return reconcile.Result{}, nil - } - - cfg, err := cfgbroker.Parse(string(content)) - if err != nil { - return reconcile.Result{}, fmt.Errorf("error parsing config from secret %q: %w", s.Name, err) - } - - for _, cb := range r.cbs { - cb(cfg) - } - - return reconcile.Result{}, nil -} diff --git a/pkg/brokers/common/kubernetes/status/status.go b/pkg/brokers/common/kubernetes/status/status.go deleted file mode 100644 index 2257fb78..00000000 --- a/pkg/brokers/common/kubernetes/status/status.go +++ /dev/null @@ -1,105 +0,0 @@ -package status - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/zeiss/typhoon/pkg/brokers/status" -) - -type kubernetesBackend struct { - // Instance must be unique for every instance of the broker, it will - // be used as the root element for the status reporting structure. - instance string - - // Expiry for statuses informed from other instances. - instanceExpire time.Duration - - // ConfigMap object and key identification - key client.ObjectKey - cmkey string - - client client.Client - logger *zap.SugaredLogger -} - -// Returns a kubernetes status manager object. Parameters are: -// - name, namespace and key for the ConfigMap where the status will be written to. -// - identifier for this broker instance. -// - instance expiration for all other instances informed at the configmap. -// - kubernetes client -// - logger -func NewKubernetesBackend(name, namespace, cmkey, instance string, instanceExpire time.Duration, kc client.Client, log *zap.SugaredLogger) status.Backend { - km := &kubernetesBackend{ - instance: instance, - instanceExpire: instanceExpire, - - key: client.ObjectKey{ - Namespace: namespace, - Name: name, - }, - - cmkey: cmkey, - client: kc, - - logger: log, - } - - return km -} - -func (b *kubernetesBackend) UpdateStatus(ctx context.Context, s *status.Status) error { - // Read current contents of the status at the ConfigMap. - cm := &corev1.ConfigMap{} - err := b.client.Get(ctx, b.key, cm) - if err != nil { - return fmt.Errorf("could not read status configmap: %w", err) - } - - if cm.Data == nil { - cm.Data = make(map[string]string) - } - - // Parse ConfigMap key contents into a Status structure. - // If it does not exists or is not formatted an empty one will be used. - st := map[string]status.Status{} - data, ok := cm.Data[b.cmkey] - if ok { - if err := json.Unmarshal([]byte(data), &st); err != nil { - b.logger.Errorw("status ConfigMap contents could not be parsed. Status will be overwritten", zap.Error(err)) - } - } - - // Iterate all entries, remove those instances that are stale - for k := range st { - // Skip own instance, we will take care of it after this iteration. - if k == b.instance { - continue - } - - if time.Since(st[k].LastUpdated) > b.instanceExpire { - b.logger.Infof("Deleting expired instance status for %s", k) - delete(st, k) - } - } - - st[b.instance] = *s - bst, err := json.Marshal(st) - if err != nil { - return fmt.Errorf("failed to marshal status: %w", err) - } - - cm.Data[b.cmkey] = string(bst) - if err = b.client.Update(ctx, cm, &client.UpdateOptions{}); err != nil { - return err - } - - return nil -} diff --git a/pkg/brokers/common/metrics/metrics.go b/pkg/brokers/common/metrics/metrics.go deleted file mode 100644 index a61cbaeb..00000000 --- a/pkg/brokers/common/metrics/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -package metrics - -import ( - "context" - "sync" - - "go.opencensus.io/resource" - "go.opencensus.io/tag" - "knative.dev/pkg/metrics/metricskey" -) - -const ( - resourceTypeTyphoonBroker = "typhoon_broker" - - labelBrokerName = "broker_name" - // labelUniqueName = "unique_name" - labelReceivedEventType = "received_type" -) - -var ( - once sync.Once - - ReceivedEventTypeKey = tag.MustNewKey(labelReceivedEventType) -) - -// func InitializeReportingContext(ctx context.Context, brokerName, instanceID string) context.Context { -func InitializeReportingContext(ctx context.Context, brokerName string) context.Context { - once.Do(func() { - ctx = metricskey.WithResource(ctx, resource.Resource{ - Type: resourceTypeTyphoonBroker, - Labels: map[string]string{ - labelBrokerName: brokerName, - // labelUniqueName: instanceID, - }, - }) - }) - return ctx -} diff --git a/pkg/brokers/config/broker/config.go b/pkg/brokers/config/broker/config.go deleted file mode 100644 index 8e5c75b1..00000000 --- a/pkg/brokers/config/broker/config.go +++ /dev/null @@ -1,228 +0,0 @@ -package broker - -import ( - "context" - "net/url" - - "sigs.k8s.io/yaml" - - "knative.dev/pkg/apis" -) - -type Ingest struct { - User string `json:"user"` - Password string `json:"password"` -} - -func (i *Ingest) Validate(ctx context.Context) *apis.FieldError { - if i == nil { - return nil - } - - if i.Password != "" && i.User == "" { - return &apis.FieldError{ - Message: "user must be provided when password is informed", - Paths: []string{"user"}, - } - } - - return nil -} - -type BackoffPolicyType string - -const ( - BackoffPolicyConstant BackoffPolicyType = "constant" - BackoffPolicyLinear BackoffPolicyType = "linear" - BackoffPolicyExponential BackoffPolicyType = "exponential" -) - -type DeliveryOptions struct { - Retry *int32 `json:"retry,omitempty"` - BackoffPolicy *BackoffPolicyType `json:"backoffPolicy,omitempty"` - - // BackoffDelay is the delay before retrying. - // More information on Duration format: - // - https://www.iso.org/iso-8601-date-and-time-format.html - // - https://en.wikipedia.org/wiki/ISO_8601 - BackoffDelay *string `json:"backoffDelay,omitempty"` - DeadLetterURL *string `json:"deadLetterURL,omitempty"` -} - -func (d *DeliveryOptions) Validate(ctx context.Context) (errs *apis.FieldError) { - if d == nil { - return - } - - if d.DeadLetterURL != nil && *d.DeadLetterURL != "" { - if _, err := url.Parse(*d.DeadLetterURL); err != nil { - errs = errs.Also(&apis.FieldError{ - Message: "DLS URL cannot be parsed", - Paths: []string{"deadLetterURL"}, - Details: err.Error(), - }) - } - } - - return -} - -type Target struct { - URL *string `json:"url,,omitempty"` - // Deprecated, use the trigger's Delivery options instead. - DeliveryOptions *DeliveryOptions `json:"deliveryOptions,omitempty"` -} - -func (i *Target) Validate(ctx context.Context) (errs *apis.FieldError) { - if i == nil { - return - } - - if i.URL != nil && *i.URL != "" { - if _, err := url.Parse(*i.URL); err != nil { - errs = errs.Also(&apis.FieldError{ - Message: "Target URL cannot be parsed", - Paths: []string{"url"}, - Details: err.Error(), - }) - } - } - - return errs.Also(i.DeliveryOptions.Validate(ctx)) -} - -type Filter struct { - // All evaluates to true if all the nested expressions evaluate to true. - // It must contain at least one filter expression. - // - // +optional - All []Filter `json:"all,omitempty"` - - // Any evaluates to true if at least one of the nested expressions evaluates - // to true. It must contain at least one filter expression. - // - // +optional - Any []Filter `json:"any,omitempty"` - - // Not evaluates to true if the nested expression evaluates to false. - // - // +optional - Not *Filter `json:"not,omitempty"` - - // Exact evaluates to true if the value of the matching CloudEvents - // attribute matches exactly the String value specified (case-sensitive). - // Exact must contain exactly one property, where the key is the name of the - // CloudEvents attribute to be matched, and its value is the String value to - // use in the comparison. The attribute name and value specified in the filter - // expression cannot be empty strings. - // - // +optional - Exact map[string]string `json:"exact,omitempty"` - - // Prefix evaluates to true if the value of the matching CloudEvents - // attribute starts with the String value specified (case-sensitive). Prefix - // must contain exactly one property, where the key is the name of the - // CloudEvents attribute to be matched, and its value is the String value to - // use in the comparison. The attribute name and value specified in the filter - // expression cannot be empty strings. - // - // +optional - Prefix map[string]string `json:"prefix,omitempty"` - - // Suffix evaluates to true if the value of the matching CloudEvents - // attribute ends with the String value specified (case-sensitive). Suffix - // must contain exactly one property, where the key is the name of the - // CloudEvents attribute to be matched, and its value is the String value to - // use in the comparison. The attribute name and value specified in the filter - // expression cannot be empty strings. - // - // +optional - Suffix map[string]string `json:"suffix,omitempty"` -} - -// Bounds applied to the trigger that mark the initial and final item to -// be sent from the broker. -type Bounds struct { - Start *string `json:"start"` - End *string `json:"end"` -} - -func (b *Bounds) GetStart() string { - if b == nil || b.Start == nil { - return "" - } - - return *b.Start -} - -func (b *Bounds) GetEnd() string { - if b == nil || b.End == nil { - return "" - } - - return *b.End -} - -type TriggerBounds struct { - ByID *Bounds `json:"byId,omitempty"` - ByDate *Bounds `json:"byDate,omitempty"` -} - -type Trigger struct { - Filters []Filter `json:"filters,omitempty"` - Target Target `json:"target"` - DeliveryOptions *DeliveryOptions `json:"deliveryOptions,omitempty"` - Bounds *TriggerBounds `json:"bounds,omitempty"` -} - -// HACK temporary to make the Delivery options move smooth, -// remove this once Target does not host the deliver options. -func (t *Trigger) GetDeliveryOptions() *DeliveryOptions { - if t.DeliveryOptions != nil { - return t.DeliveryOptions - } - return t.Target.DeliveryOptions -} - -func (t *Trigger) Validate(ctx context.Context) *apis.FieldError { - var errs *apis.FieldError - - if t == nil { - return nil - } - return errs.Also(t.Target.Validate(ctx)).ViaField("target"). - Also(t.DeliveryOptions.Validate(ctx).ViaField("deliveryOptions")). - Also(ValidateSubscriptionAPIFiltersList(ctx, t.Filters).ViaField("filters")) -} - -type Config struct { - Ingest *Ingest `json:"ingest,omitempty"` - Triggers map[string]Trigger `json:"triggers"` -} - -func (c *Config) Validate(ctx context.Context) *apis.FieldError { - if c == nil { - return nil - } - - errs := c.Ingest.Validate(ctx).ViaField("ingest") - - for k, t := range c.Triggers { - errs = errs.Also(t.Validate(ctx).ViaFieldKey("triggers", k)) - } - - return errs -} - -func Parse(config string) (*Config, error) { - c := &Config{} - if err := yaml.Unmarshal([]byte(config), c); err != nil { - return nil, err - } - - if err := c.Validate(context.Background()); err != nil { - return nil, err - } - - return c, nil -} diff --git a/pkg/brokers/config/broker/poller/poller.go b/pkg/brokers/config/broker/poller/poller.go deleted file mode 100644 index d9360a43..00000000 --- a/pkg/brokers/config/broker/poller/poller.go +++ /dev/null @@ -1,81 +0,0 @@ -package poller - -import ( - "context" - "fmt" - "path/filepath" - - "go.uber.org/zap" - - "github.com/zeiss/typhoon/pkg/brokers/common/fs" - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" -) - -type PollerCallback func(*cfgbroker.Config) - -type Poller struct { - fsp fs.Poller - path string - logger *zap.SugaredLogger - - config *cfgbroker.Config - cbs []PollerCallback -} - -func NewPoller(fsp fs.Poller, path string, logger *zap.SugaredLogger) (*Poller, error) { - absPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", path, err) - } - - if absPath != path { - return nil, fmt.Errorf("configuration path %q needs to be abstolute", path) - } - - return &Poller{ - fsp: fsp, - path: path, - logger: logger, - }, nil -} - -func (cw *Poller) AddCallback(cb PollerCallback) { - cw.cbs = append(cw.cbs, cb) -} - -func (cw *Poller) GetConfig() *cfgbroker.Config { - return cw.config -} - -func (cw *Poller) Start(ctx context.Context) error { - err := cw.fsp.Add(cw.path, cw.update) - if err != nil { - return err - } - - if cfg, err := cw.fsp.GetContent(cw.path); cfg != nil && err == nil { - cw.update(cfg) - } - - cw.fsp.Start(ctx) - return nil -} - -func (cw *Poller) update(content []byte) { - if len(content) == 0 { - // Discard file events that do not inform content. - cw.logger.Debug(fmt.Sprintf("Received event with empty contents for %s", cw.path)) - return - } - - cfg, err := cfgbroker.Parse(string(content)) - if err != nil { - cw.logger.Errorw(fmt.Sprintf("Error parsing config from %s", cw.path), zap.Error(err)) - return - } - - cw.config = cfg - for _, cb := range cw.cbs { - cb(cfg) - } -} diff --git a/pkg/brokers/config/broker/types_deepcopy.go b/pkg/brokers/config/broker/types_deepcopy.go deleted file mode 100644 index 0fe5caa1..00000000 --- a/pkg/brokers/config/broker/types_deepcopy.go +++ /dev/null @@ -1,60 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -package broker - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Filter) DeepCopyInto(out *Filter) { - *out = *in - if in.All != nil { - in, out := &in.All, &out.All - *out = make([]Filter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Any != nil { - in, out := &in.Any, &out.Any - *out = make([]Filter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Not != nil { - in, out := &in.Not, &out.Not - *out = new(Filter) - (*in).DeepCopyInto(*out) - } - if in.Exact != nil { - in, out := &in.Exact, &out.Exact - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Prefix != nil { - in, out := &in.Prefix, &out.Prefix - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Suffix != nil { - in, out := &in.Suffix, &out.Suffix - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. -func (in *Filter) DeepCopy() *Filter { - if in == nil { - return nil - } - out := new(Filter) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/brokers/config/broker/types_validation.go b/pkg/brokers/config/broker/types_validation.go deleted file mode 100644 index f14cd252..00000000 --- a/pkg/brokers/config/broker/types_validation.go +++ /dev/null @@ -1,102 +0,0 @@ -package broker - -import ( - "context" - "regexp" - - "knative.dev/eventing/pkg/apis/feature" - "knative.dev/pkg/apis" -) - -// Only allow lowercase alphanumeric, starting with letters. -var validAttributeName = regexp.MustCompile(`^[a-z][a-z0-9]*$`) - -func ValidateAttributesNames(attrs map[string]string) (errs *apis.FieldError) { - for attr := range attrs { - if !validAttributeName.MatchString(attr) { - errs = errs.Also(apis.ErrInvalidKeyName(attr, apis.CurrentField, "Attribute name must start with a letter and can only contain lowercase alphanumeric").ViaKey(attr)) - } - } - return errs -} - -func ValidateSubscriptionAPIFiltersList(ctx context.Context, filters []Filter) (errs *apis.FieldError) { - if filters == nil || !feature.FromContext(ctx).IsEnabled(feature.NewTriggerFilters) { - return nil - } - - for i, f := range filters { - f := f - errs = errs.Also(ValidateSubscriptionAPIFilter(ctx, &f)).ViaIndex(i) - } - return errs -} - -func ValidateSubscriptionAPIFilter(ctx context.Context, filter *Filter) (errs *apis.FieldError) { - if filter == nil { - return nil - } - errs = errs.Also( - ValidateOneOf(filter), - ).Also( - ValidateAttributesNames(filter.Exact).ViaField("exact"), - ).Also( - ValidateAttributesNames(filter.Prefix).ViaField("prefix"), - ).Also( - ValidateAttributesNames(filter.Suffix).ViaField("suffix"), - ).Also( - ValidateSubscriptionAPIFiltersList(ctx, filter.All).ViaField("all"), - ).Also( - ValidateSubscriptionAPIFiltersList(ctx, filter.Any).ViaField("any"), - ).Also( - ValidateSubscriptionAPIFilter(ctx, filter.Not).ViaField("not"), - ) - return errs -} - -func ValidateOneOf(filter *Filter) (err *apis.FieldError) { - if filter != nil && hasMultipleDialects(filter) { - return apis.ErrGeneric("multiple dialects found, filters can have only one dialect set") - } - return nil -} - -func hasMultipleDialects(filter *Filter) bool { - dialectFound := false - if len(filter.Exact) > 0 { - dialectFound = true - } - if len(filter.Prefix) > 0 { - if dialectFound { - return true - } else { - dialectFound = true - } - } - if len(filter.Suffix) > 0 { - if dialectFound { - return true - } else { - dialectFound = true - } - } - if len(filter.All) > 0 { - if dialectFound { - return true - } else { - dialectFound = true - } - } - if len(filter.Any) > 0 { - if dialectFound { - return true - } else { - dialectFound = true - } - } - if filter.Not != nil && dialectFound { - return true - } - - return false -} diff --git a/pkg/brokers/config/broker/watcher/watcher.go b/pkg/brokers/config/broker/watcher/watcher.go deleted file mode 100644 index 3cb2cdea..00000000 --- a/pkg/brokers/config/broker/watcher/watcher.go +++ /dev/null @@ -1,84 +0,0 @@ -package watcher - -import ( - "context" - "fmt" - "path/filepath" - - "go.uber.org/zap" - - "github.com/zeiss/typhoon/pkg/brokers/common/fs" - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" -) - -type WatcherCallback func(*cfgbroker.Config) - -type Watcher struct { - cfw fs.CachedFileWatcher - path string - logger *zap.SugaredLogger - - config *cfgbroker.Config - cbs []WatcherCallback -} - -func NewWatcher(cfw fs.CachedFileWatcher, path string, logger *zap.SugaredLogger) (*Watcher, error) { - absPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", path, err) - } - - if absPath != path { - return nil, fmt.Errorf("configuration path %q needs to be abstolute", path) - } - - return &Watcher{ - cfw: cfw, - path: path, - logger: logger, - }, nil -} - -func (cw *Watcher) AddCallback(cb WatcherCallback) { - cw.cbs = append(cw.cbs, cb) -} - -func (cw *Watcher) GetConfig() *cfgbroker.Config { - return cw.config -} - -func (cw *Watcher) Start(ctx context.Context) error { - err := cw.cfw.Add(cw.path, cw.update) - if err != nil { - return err - } - - // Perform a first call to the callback with the contents of the config - // file. Otherwise the callback won't be called until a modification - // occurs. - if cfg, err := cw.cfw.GetContent(cw.path); cfg != nil && err == nil { - cw.update(cfg) - } - - cw.cfw.Start(ctx) - return nil -} - -func (cw *Watcher) update(content []byte) { - if len(content) == 0 { - // Discard file events that do not inform content. - cw.logger.Debug(fmt.Sprintf("Received event with empty contents for %s", cw.path)) - return - } - - cfg, err := cfgbroker.Parse(string(content)) - if err != nil { - cw.logger.Errorw(fmt.Sprintf("Error parsing config from %s", cw.path), zap.Error(err)) - return - } - - cw.config = cfg - for _, cb := range cw.cbs { - cb(cfg) - } -} diff --git a/pkg/brokers/config/observability/config.go b/pkg/brokers/config/observability/config.go deleted file mode 100644 index 0c6ed273..00000000 --- a/pkg/brokers/config/observability/config.go +++ /dev/null @@ -1,148 +0,0 @@ -package observability - -import ( - "encoding/json" - "fmt" - "os" - "strconv" - - "go.uber.org/zap" - "sigs.k8s.io/yaml" -) - -const ( - zapLoggerConfigLabel = "zap-logger-config" - backendDestinationLabel = "metrics.backend-destination" - reportingPeriodSecondsLabel = "metrics.reporting-period-seconds" - prometheusPortLabel = "metrics.prometheus-port" - openCensusAddressLabel = "metrics.opencensus-address" -) - -type Config struct { - *MetricsConfig `json:",inline"` - ZapLoggerConfig string `json:"zap-logger-config"` - - LoggerCfg *zap.Config `json:"-"` -} - -func (c *Config) ToMap() (map[string]string, error) { - b, err := json.Marshal(c) - if err != nil { - return nil, err - } - - mi := map[string]interface{}{} - err = json.Unmarshal(b, &mi) - if err != nil { - return nil, err - } - - m := make(map[string]string, len(mi)) - for k, v := range mi { - if s, ok := v.(string); ok { - if s != "" { - m[k] = s - } - continue - } - if f, ok := v.(float64); ok { - if f != 0 { - m[k] = strconv.Itoa(int(f)) - } - continue - } - return nil, fmt.Errorf("config element %s type is unexpected: %T(%v)", k, v, v) - } - - return m, nil -} - -type MetricsConfig struct { - BackendDestination string `json:"metrics.backend-destination"` - ReportingPeriodSeconds int `json:"metrics.reporting-period-seconds"` - PrometheusPort int `json:"metrics.prometheus-port"` - OpenCensusAddress string `json:"metrics.opencensus-address"` -} - -func ReadFromFile(file string) (*Config, error) { - f, err := os.ReadFile(file) - if err != nil { - return nil, fmt.Errorf("could not read %s: %+w", file, err) - } - - return Parse(f) -} - -func defaultZapConfig() *zap.Config { - lc := zap.NewProductionConfig() - return &lc -} - -func defaultMetricsConfig() *MetricsConfig { - return &MetricsConfig{} -} - -func DefaultConfig() *Config { - return &Config{ - LoggerCfg: defaultZapConfig(), - MetricsConfig: defaultMetricsConfig(), - } -} - -func Parse(content []byte) (*Config, error) { - cfg := &Config{} - if err := yaml.Unmarshal(content, &cfg); err != nil { - return nil, fmt.Errorf("could not parse observability data into string map: %+w", err) - } - - loggingCfg := defaultZapConfig() - if cfg.ZapLoggerConfig != "" { - if err := json.Unmarshal([]byte(cfg.ZapLoggerConfig), loggingCfg); err != nil { - return nil, err - } - } - - cfg.LoggerCfg = loggingCfg - - if cfg.MetricsConfig == nil { - cfg.MetricsConfig = defaultMetricsConfig() - } - - return cfg, nil -} - -func ParseFromMap(content map[string]string) (*Config, error) { - cfg := DefaultConfig() - - if c, ok := content[zapLoggerConfigLabel]; ok { - if err := json.Unmarshal([]byte(c), cfg.LoggerCfg); err != nil { - return nil, fmt.Errorf("could not unmarshal zap logger config: %w", err) - } - } - - if c, ok := content[backendDestinationLabel]; ok { - cfg.BackendDestination = c - } - - if c, ok := content[reportingPeriodSecondsLabel]; ok { - p, err := strconv.Atoi(c) - if err != nil { - return nil, fmt.Errorf("reporting period seconds must be an integer number: %w", err) - } - cfg.ReportingPeriodSeconds = p - } - - if c, ok := content[prometheusPortLabel]; ok { - p, err := strconv.Atoi(c) - if err != nil { - return nil, fmt.Errorf("prometheus port must be an integer number: %w", err) - } - cfg.PrometheusPort = p - } - - if c, ok := content[openCensusAddressLabel]; ok { - cfg.OpenCensusAddress = c - } - - return cfg, nil -} diff --git a/pkg/brokers/config/observability/config_test.go b/pkg/brokers/config/observability/config_test.go deleted file mode 100644 index f920d038..00000000 --- a/pkg/brokers/config/observability/config_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package observability - -import ( - "testing" - - "github.com/stretchr/testify/require" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func TestParse(t *testing.T) { - info := zap.NewAtomicLevelAt(zapcore.InfoLevel) - cases := map[string]struct { - content string - expectedConfig Config - }{ - "simple logger config": { - content: ` -zap-logger-config: | - { - "level": "info" - } - -`, expectedConfig: Config{ - LoggerCfg: &zap.Config{ - Level: info, - Development: false, - }, - MetricsConfig: &MetricsConfig{}, - }, - }, - "full logger config": { - content: ` -zap-logger-config: | - { - "level": "info", - "development": true, - "outputPaths": ["stdout"], - "errorOutputPaths": ["stderr"], - "encoding": "json", - "encoderConfig": { - "timeKey": "timestamp", - "levelKey": "severity", - "nameKey": "logger", - "callerKey": "caller", - "messageKey": "message", - "stacktraceKey": "stacktrace", - "lineEnding": "", - "levelEncoder": "", - "timeEncoder": "iso8601", - "durationEncoder": "", - "callerEncoder": "" - } - } -`, expectedConfig: Config{ - LoggerCfg: &zap.Config{ - Level: info, - Development: true, - }, - MetricsConfig: &MetricsConfig{}, - }, - }, - "metrics config": { - content: ` -zap-logger-config: | - { - "level": "info" - } -metrics.backend-destination: prometheus -metrics.reporting-period-seconds: 5 -metrics.prometheus-port: 9092 - -`, expectedConfig: Config{ - LoggerCfg: &zap.Config{ - Level: info, - Development: false, - }, - MetricsConfig: &MetricsConfig{ - BackendDestination: "prometheus", - PrometheusPort: 9092, - ReportingPeriodSeconds: 5, - }, - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - c, err := Parse([]byte(tc.content)) - require.Equal(t, err, nil) - - t.Logf("Config: %+v", c) - t.Logf("Config: %+v", c.MetricsConfig) - t.Logf("Config: %+v", c.ZapLoggerConfig) - - // Compare logger configuration elements. - require.Equal(t, tc.expectedConfig.LoggerCfg.Level, c.LoggerCfg.Level) - require.Equal(t, tc.expectedConfig.LoggerCfg.Development, c.LoggerCfg.Development) - - // Compare metric configuration. - require.Equal(t, tc.expectedConfig.MetricsConfig, c.MetricsConfig) - }) - } -} - -func TestConfigToMap(t *testing.T) { - info := zap.NewAtomicLevelAt(zapcore.InfoLevel) - cases := map[string]struct { - config Config - expectedMap map[string]string - }{ - "simple": { - config: Config{ - LoggerCfg: &zap.Config{ - Level: info, - Development: false, - }, - MetricsConfig: &MetricsConfig{ - BackendDestination: "prometheus", - PrometheusPort: 9092, - ReportingPeriodSeconds: 5, - }, - }, - expectedMap: map[string]string{ - "metrics.backend-destination": "prometheus", - "metrics.prometheus-port": "9092", - "metrics.reporting-period-seconds": "5", - }, - }, - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - m, err := tc.config.ToMap() - require.NoError(t, err, "config to map failed") - require.Equal(t, tc.expectedMap, m) - }) - } -} diff --git a/pkg/brokers/config/observability/poller/poller.go b/pkg/brokers/config/observability/poller/poller.go deleted file mode 100644 index 2b72e023..00000000 --- a/pkg/brokers/config/observability/poller/poller.go +++ /dev/null @@ -1,81 +0,0 @@ -package poller - -import ( - "context" - "fmt" - "path/filepath" - - "go.uber.org/zap" - - "github.com/zeiss/typhoon/pkg/brokers/common/fs" - "github.com/zeiss/typhoon/pkg/brokers/config/observability" -) - -type PollerCallback func(*observability.Config) - -type Poller struct { - fsp fs.Poller - path string - logger *zap.SugaredLogger - - config *observability.Config - cbs []PollerCallback -} - -func NewPoller(fsp fs.Poller, path string, logger *zap.SugaredLogger) (*Poller, error) { - absPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", path, err) - } - - if absPath != path { - return nil, fmt.Errorf("configuration path %q needs to be abstolute", path) - } - - return &Poller{ - fsp: fsp, - path: path, - logger: logger, - }, nil -} - -func (cw *Poller) AddCallback(cb PollerCallback) { - cw.cbs = append(cw.cbs, cb) -} - -func (cw *Poller) GetConfig() *observability.Config { - return cw.config -} - -func (cw *Poller) Start(ctx context.Context) error { - err := cw.fsp.Add(cw.path, cw.update) - if err != nil { - return err - } - - if cfg, err := cw.fsp.GetContent(cw.path); cfg != nil && err == nil { - cw.update(cfg) - } - - cw.fsp.Start(ctx) - return nil -} - -func (cw *Poller) update(content []byte) { - if len(content) == 0 { - // Discard file events that do not inform content. - cw.logger.Debug(fmt.Sprintf("Received event with empty contents for %s", cw.path)) - return - } - - cfg, err := observability.Parse(content) - if err != nil { - cw.logger.Errorw(fmt.Sprintf("Contents for %s are not valid", cw.path), zap.Error(err)) - return - } - - cw.config = cfg - for _, cb := range cw.cbs { - cb(cfg) - } -} diff --git a/pkg/brokers/config/observability/watcher/watcher.go b/pkg/brokers/config/observability/watcher/watcher.go deleted file mode 100644 index bc35ce1d..00000000 --- a/pkg/brokers/config/observability/watcher/watcher.go +++ /dev/null @@ -1,83 +0,0 @@ -package config - -import ( - "context" - "fmt" - "path/filepath" - - "go.uber.org/zap" - - "github.com/zeiss/typhoon/pkg/brokers/common/fs" - "github.com/zeiss/typhoon/pkg/brokers/config/observability" -) - -type WatcherCallback func(*observability.Config) - -type Watcher struct { - cfw fs.CachedFileWatcher - path string - logger *zap.SugaredLogger - - config *observability.Config - cbs []WatcherCallback -} - -func NewWatcher(cfw fs.CachedFileWatcher, path string, logger *zap.SugaredLogger) (*Watcher, error) { - absPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("error resolving to absolute path %q: %w", path, err) - } - - if absPath != path { - return nil, fmt.Errorf("configuration path %q needs to be abstolute", path) - } - - return &Watcher{ - cfw: cfw, - path: path, - logger: logger, - }, nil -} - -func (cw *Watcher) AddCallback(cb WatcherCallback) { - cw.cbs = append(cw.cbs, cb) -} - -func (cw *Watcher) GetConfig() *observability.Config { - return cw.config -} - -func (cw *Watcher) Start(ctx context.Context) error { - err := cw.cfw.Add(cw.path, cw.update) - if err != nil { - return err - } - - // Perform a first call to the callback with the contents of the config - // file. Otherwise the callback won't be called until a modification - // occurs. - if cfg, err := cw.cfw.GetContent(cw.path); cfg != nil && err == nil { - cw.update(cfg) - } - - cw.cfw.Start(ctx) - return nil -} - -func (cw *Watcher) update(content []byte) { - if len(content) == 0 { - // Discard file events that do not inform content. - return - } - - cfg, err := observability.Parse(content) - if err != nil { - cw.logger.Errorw(fmt.Sprintf("Contents for %s are not valid", cw.path), zap.Error(err)) - return - } - - cw.config = cfg - for _, cb := range cw.cbs { - cb(cfg) - } -} diff --git a/pkg/brokers/ingest/ingest.go b/pkg/brokers/ingest/ingest.go deleted file mode 100644 index 945cb080..00000000 --- a/pkg/brokers/ingest/ingest.go +++ /dev/null @@ -1,168 +0,0 @@ -package ingest - -import ( - "context" - "fmt" - "net/http" - "time" - - obshttp "github.com/cloudevents/sdk-go/observability/opencensus/v2/http" - ceclient "github.com/cloudevents/sdk-go/v2/client" - - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/cloudevents/sdk-go/v2/protocol" - "go.uber.org/zap" - - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" - "github.com/zeiss/typhoon/pkg/brokers/ingest/metrics" - "github.com/zeiss/typhoon/pkg/brokers/status" -) - -type ( - CloudEventHandler func(context.Context, *cloudevents.Event) error - ProbeHandler func() error -) - -type Instance struct { - port int - - ceHandler CloudEventHandler - probeHandler ProbeHandler - - statusManager status.Manager - reporter metrics.Reporter - logger *zap.SugaredLogger -} - -type InstanceOption func(*Instance) - -func NewInstance(reporter metrics.Reporter, logger *zap.SugaredLogger, opts ...InstanceOption) *Instance { - i := &Instance{ - port: 8080, - logger: logger, - reporter: reporter, - } - - for _, opt := range opts { - opt(i) - } - - return i -} - -func InstanceWithPort(port int) InstanceOption { - return func(i *Instance) { - i.port = port - } -} - -func InstanceWithStatusManager(sm status.Manager) InstanceOption { - return func(i *Instance) { - i.statusManager = sm - } -} - -func (i *Instance) Start(ctx context.Context) error { - if i.logger == nil { - panic("logger is nil!") - } - - p, err := obshttp.NewObservedHTTP( - cloudevents.WithPort(i.port), - cloudevents.WithShutdownTimeout(10*time.Second), - cloudevents.WithGetHandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Use common health paths. - if r.URL.Path != "/healthz" && r.URL.Path != "/_ah/health" { - w.WriteHeader(http.StatusNotFound) - return - } - - if err := i.probeHandler(); err != nil { - w.WriteHeader(http.StatusInternalServerError) - _, werr := w.Write([]byte(`{"ok":"false", "error":"` + err.Error() + `"}`)) - i.logger.Errorw("Could not write HTTP response (not healthy)", zap.Errors("error", []error{ - werr, err, - })) - return - } - - if _, err := w.Write([]byte(`{"ok": "true"}`)); err != nil { - i.logger.Errorw("Could not write HTTP response (healthy)", zap.Error(err)) - } - }), - ) - if err != nil { - return fmt.Errorf("could not create a CloudEvents HTTP client protocol: %w", err) - } - - c, err := ceclient.New(p, ceclient.WithObservabilityService( - metrics.NewOpenCensusObservabilityService(i.reporter))) - if err != nil { - return fmt.Errorf("failed to create CloudEvents client: %w", err) - } - - i.logger.Infof("Listening on %d", i.port) - var handler interface{} - - if i.statusManager != nil { - // Notify and defer status manager - i.statusManager.UpdateIngestStatus(&status.IngestStatus{ - Status: status.IngestStatusReady, - }) - defer i.statusManager.UpdateIngestStatus(&status.IngestStatus{ - Status: status.IngestStatusClosed, - }) - - handler = i.cloudEventsStatusManagerHandler - } else { - handler = i.cloudEventsHandler - } - - if err := c.StartReceiver(ctx, handler); err != nil { - return fmt.Errorf("unable to start HTTP server: %w", err) - } - - return nil -} - -func (i *Instance) UpdateFromConfig(c *cfgbroker.Config) { - i.logger.Info("Ingest Server UpdateFromConfig ...") -} - -func (i *Instance) RegisterCloudEventHandler(h CloudEventHandler) { - i.ceHandler = h -} - -func (i *Instance) RegisterProbeHandler(h ProbeHandler) { - i.probeHandler = h -} - -func (i *Instance) cloudEventsStatusManagerHandler(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, protocol.Result) { - e, p := i.cloudEventsHandler(ctx, event) - - t := time.Now() - if i.statusManager != nil { - i.statusManager.UpdateIngestStatus(&status.IngestStatus{ - Status: status.IngestStatusRunning, - LastIngested: &t, - }) - } - - return e, p -} - -func (i *Instance) cloudEventsHandler(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, protocol.Result) { - i.logger.Debug(fmt.Sprintf("Received CloudEvent: %v", event.String())) - - if i.ceHandler == nil { - i.logger.Errorw("CloudEvent lost due to no ingest handler configured") - return nil, protocol.ResultNACK - } - - if err := i.ceHandler(ctx, &event); err != nil { - i.logger.Errorw("Could not produce CloudEvent to broker", zap.Error(err)) - return nil, protocol.ResultNACK - } - - return nil, protocol.ResultACK -} diff --git a/pkg/brokers/ingest/metrics/observability_service.go b/pkg/brokers/ingest/metrics/observability_service.go deleted file mode 100644 index 1a263c3a..00000000 --- a/pkg/brokers/ingest/metrics/observability_service.go +++ /dev/null @@ -1,70 +0,0 @@ -package metrics - -import ( - "context" - "time" - - "go.opencensus.io/trace" - - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/cloudevents/sdk-go/v2/binding" - ceclient "github.com/cloudevents/sdk-go/v2/client" - - "github.com/cloudevents/sdk-go/v2/protocol" -) - -type opencensusObservabilityService struct { - reporter Reporter -} - -func NewOpenCensusObservabilityService(r Reporter) ceclient.ObservabilityService { - return opencensusObservabilityService{ - reporter: r, - } -} - -func (o opencensusObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { - return []func(context.Context, binding.Message) context.Context{tracePropagatorContextDecorator} -} - -func (o opencensusObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) { - o.reporter.ReportNonValidEvent() -} - -func (o opencensusObservabilityService) RecordCallingInvoker(ctx context.Context, event *cloudevents.Event) (context.Context, func(errOrResult error)) { - start := time.Now() - return ctx, func(errOrResult error) { - o.reporter.ReportProcessedEvent( - protocol.IsACK(errOrResult), - event.Type(), - float64(time.Since(start)/time.Millisecond)) - } -} - -func (o opencensusObservabilityService) RecordSendingEvent(ctx context.Context, event cloudevents.Event) (context.Context, func(errOrResult error)) { - // Not used at ingest - return ctx, nil -} - -func (o opencensusObservabilityService) RecordRequestEvent(ctx context.Context, event cloudevents.Event) (context.Context, func(errOrResult error, event *cloudevents.Event)) { - // Not used at ingest - return ctx, nil -} - -func tracePropagatorContextDecorator(ctx context.Context, msg binding.Message) context.Context { - var messageCtx context.Context - if mctx, ok := msg.(binding.MessageContext); ok { - messageCtx = mctx.Context() - } else if mctx, ok := binding.UnwrapMessage(msg).(binding.MessageContext); ok { - messageCtx = mctx.Context() - } - - if messageCtx == nil { - return ctx - } - span := trace.FromContext(messageCtx) - if span == nil { - return ctx - } - return trace.NewContext(ctx, span) -} diff --git a/pkg/brokers/ingest/metrics/reporter.go b/pkg/brokers/ingest/metrics/reporter.go deleted file mode 100644 index 6c286e64..00000000 --- a/pkg/brokers/ingest/metrics/reporter.go +++ /dev/null @@ -1,131 +0,0 @@ -package metrics - -import ( - "context" - "fmt" - "strconv" - "sync" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - - knmetrics "knative.dev/pkg/metrics" - - "github.com/zeiss/typhoon/pkg/brokers/common/metrics" -) - -const ( - LabelIngested = "ingested" -) - -var ( - ingestedEventKey = tag.MustNewKey(LabelIngested) - - // eventCountM is a counter which records the number of events received - // by the Broker. - eventCountM = stats.Int64( - "ingest/event_count", - "Number of events received by a Broker ingestion.", - stats.UnitDimensionless, - ) - - // latencyMs measures the latency in milliseconds for the CloudEvents - // client methods. - latencyMs = stats.Float64( - "ingest/event_latency", - "The latency in milliseconds for the Broker CloudEvents ingestion.", - "ms") - - // rejectedCountM is a counter which records the number of requests that - // could not be processed as events. - rejectedCountM = stats.Int64( - "ingest/rejected_count", - "Number of requests rejected by the Broker ingestion.", - stats.UnitDimensionless, - ) -) - -func registerStatViews() error { - tagKeys := []tag.Key{ - metrics.ReceivedEventTypeKey, - ingestedEventKey, - } - - // Create view to see our measurements. - return knmetrics.RegisterResourceView( - &view.View{ - Name: latencyMs.Name(), - Description: latencyMs.Description(), - Measure: latencyMs, - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: tagKeys, - }, - &view.View{ - Name: eventCountM.Name(), - Description: eventCountM.Description(), - Measure: eventCountM, - Aggregation: view.Count(), - TagKeys: tagKeys, - }, - &view.View{ - Name: rejectedCountM.Name(), - Description: rejectedCountM.Description(), - Measure: rejectedCountM, - Aggregation: view.Count(), - TagKeys: []tag.Key{}, - }, - ) -} - -type Reporter interface { - ReportProcessedEvent(ingested bool, eventType string, msLatency float64) - ReportNonValidEvent() -} - -// Reporter holds cached metric objects to report ingress metrics. -type reporter struct { - ctx context.Context - logger *zap.SugaredLogger -} - -var once sync.Once - -// NewReporter retuns a StatReporter for ingested events. -func NewReporter(ctx context.Context) (Reporter, error) { - r := &reporter{} - - var err error - once.Do(func() { - if err = registerStatViews(); err != nil { - err = fmt.Errorf("error registering OpenCensus stats view: %w", err) - return - } - }) - - if err != nil { - return nil, err - } - - r.ctx = ctx - - return r, nil -} - -func (r *reporter) ReportProcessedEvent(ingested bool, eventType string, msLatency float64) { - ctx, err := tag.New(r.ctx, - tag.Insert(metrics.ReceivedEventTypeKey, eventType), - tag.Insert(ingestedEventKey, strconv.FormatBool(ingested)), - ) - if err != nil { - r.logger.Errorw("error setting tags to OpenCensus context", zap.Error(err)) - } - - knmetrics.Record(ctx, latencyMs.M(msLatency)) - knmetrics.Record(ctx, eventCountM.M(1)) -} - -func (r *reporter) ReportNonValidEvent() { - knmetrics.Record(r.ctx, rejectedCountM.M(1)) -} diff --git a/pkg/brokers/status/status.go b/pkg/brokers/status/status.go deleted file mode 100644 index 4ab70279..00000000 --- a/pkg/brokers/status/status.go +++ /dev/null @@ -1,230 +0,0 @@ -package status - -import ( - "context" - "sync" - "time" - - "go.uber.org/zap" -) - -type SubscriptionStatusChoice string - -const ( - // The subscription has been created and is able to process events. - SubscriptionStatusReady SubscriptionStatusChoice = "Ready" - // The subscription has started processing events. - SubscriptionStatusRunning SubscriptionStatusChoice = "Running" - // The subscription could not be created. - SubscriptionStatusFailed SubscriptionStatusChoice = "Failed" - // The subscription will not receive further events and can be deleted. - SubscriptionStatusComplete SubscriptionStatusChoice = "Complete" -) - -type IngestStatusChoice string - -const ( - // The ingest has been created and is able to receive events. - IngestStatusReady IngestStatusChoice = "Ready" - // The ingest has started receiving events. - IngestStatusRunning IngestStatusChoice = "Running" - // The ingest has been closed. - IngestStatusClosed IngestStatusChoice = "Closed" -) - -type Backend interface { - UpdateStatus(ctx context.Context, s *Status) error -} - -type Manager interface { - RegisterBackendStatusWriters(b Backend) - Start(ctx context.Context) - - UpdateIngestStatus(is *IngestStatus) - EnsureSubscription(name string, ss *SubscriptionStatus) - EnsureNoSubscription(name string) -} - -type manager struct { - // Cached structure for the status that avoids trying - // rewrites when there are no status changes. - // - // The cached structure will be considered stale after some - // configurable duration. - // - // lastStatusWrite checkpoints the last time the ConfigMap was written, and will be - // combined with cacheExpiration to calculate cache expiration - cached *Status - cacheExpiration time.Duration - lastStatusWrite time.Time - - // The status manager will run reconciling cycles according to the resyncPeriod duration. - // If the status cache has expired, the backend's update will be triggered. - // - // writeAsap is a flag set when a status change must be written at the next - // reconciliation, no matter if the cached status is stale or not. - // - // A reconcile cycle can be explicitly run using the chReconcile channel. - resyncPeriod time.Duration - writeAsap bool - chReconcile chan struct{} - - // registered status reconciler channels - statusBackends []Backend - - log *zap.SugaredLogger - m sync.Mutex -} - -func NewManager(cacheExpiration time.Duration, resyncPeriod time.Duration, log *zap.SugaredLogger) Manager { - return &manager{ - cached: &Status{Subscriptions: make(map[string]*SubscriptionStatus)}, - cacheExpiration: cacheExpiration, - - resyncPeriod: resyncPeriod, - writeAsap: true, - chReconcile: make(chan struct{}), - - statusBackends: []Backend{}, - - log: log, - m: sync.Mutex{}, - } -} - -func (m *manager) Start(ctx context.Context) { - ticker := time.NewTicker(m.resyncPeriod) - defer ticker.Stop() - - for { - select { - case <-m.chReconcile: - // fall out of select block - case <-ticker.C: - // fall out of select block - case <-ctx.Done(): - return - } - - // Skip if there are no pending writes and the - // cache is not stale - if !m.writeAsap && m.lastStatusWrite.Add(m.cacheExpiration).After(time.Now()) { - continue - } - - m.updateStatus(ctx) - } -} - -func (m *manager) updateStatus(ctx context.Context) { - m.m.Lock() - defer m.m.Unlock() - - // Touch last updated before sending to all backends. - m.cached.LastUpdated = time.Now() - - // Iterate all registered status backends and call then. - failed := false - for i := range m.statusBackends { - if err := m.statusBackends[i].UpdateStatus(ctx, m.cached); err != nil { - m.log.Errorw("Failed updating the status", zap.Error(err)) - if failed { - failed = true - } - } - } - - if failed { - // If the status update failed, raise the flag to force - // write at the next cycle. - m.writeAsap = true - } else { - // If all backends succeeded unset the writeAsap flag - // and set the last status timestamp - m.writeAsap = false - m.lastStatusWrite = time.Now() - } -} - -func (m *manager) RegisterBackendStatusWriters(b Backend) { - m.m.Lock() - defer m.m.Unlock() - - m.statusBackends = append(m.statusBackends, b) -} - -func (m *manager) UpdateIngestStatus(is *IngestStatus) { - m.m.Lock() - defer m.m.Unlock() - - if m.cached.Ingest.EqualStatus(is) { - // If status is equal do not enqueue an update. - return - } - - // If status differs from existing, update it at the structure. - m.cached.Ingest = *is - - // This update is not a priority, overwrite the ingest element and - // let a different status update (like the status cache expired) - // to writte it to the ConfigMap - if m.cached.Ingest.EqualSoftStatus(is) { - return - } - - // This update must be written asap. Mark the flag and send the signal - m.writeAsap = true - - m.maybeEnqueueReconcile() -} - -func (m *manager) EnsureSubscription(name string, ss *SubscriptionStatus) { - m.m.Lock() - defer m.m.Unlock() - - s, ok := m.cached.Subscriptions[name] - - // Fill not informed values with existing. - ss.Merge(s) - - switch { - case ok && s.EqualStatus(ss): - // If status is equal do not enqueue an update. - - case ok && s.EqualSoftStatus(ss): - m.cached.Subscriptions[name] = ss - // This update is not a priority, overwrite the ingest element and - // let a different status update (like the status cache expired) - // to update the status. - - default: - // Either a new subscription or an update that needs - // to be written asap - - m.cached.Subscriptions[name] = ss - - m.writeAsap = true - m.maybeEnqueueReconcile() - } -} - -func (m *manager) EnsureNoSubscription(name string) { - m.m.Lock() - defer m.m.Unlock() - - if _, ok := m.cached.Subscriptions[name]; ok { - delete(m.cached.Subscriptions, name) - m.writeAsap = true - m.maybeEnqueueReconcile() - } -} - -func (m *manager) maybeEnqueueReconcile() { - select { - case m.chReconcile <- struct{}{}: - // Try to send but if busy skip - - default: - m.log.Debugw("Skipping status reconciliation due to full queue") - } -} diff --git a/pkg/brokers/status/types.go b/pkg/brokers/status/types.go deleted file mode 100644 index 3e59b3df..00000000 --- a/pkg/brokers/status/types.go +++ /dev/null @@ -1,176 +0,0 @@ -package status - -import ( - "time" -) - -// Status of a broker instance. -type Status struct { - // More information on Duration format: - // - https://www.iso.org/iso-8601-date-and-time-format.html - // - https://en.wikipedia.org/wiki/ISO_8601 - LastUpdated time.Time `json:"lastUpdated,omitempty"` - Ingest IngestStatus `json:"ingest,omitempty"` - Subscriptions map[string]*SubscriptionStatus `json:"subscriptions,omitempty"` -} - -// EqualSoftStatus compares two status instances core event data, not taking into account -// timestamps at each structure level. -// -// This function is not thread safe, it is up to the caller to make sure structures are -// not concurrently modified. -func (s *Status) EqualSoftStatus(in *Status) bool { - // If ingest not equal, return false. - if !s.Ingest.EqualSoftStatus(&in.Ingest) { - return false - } - - // If subscriptions have been added or deleted, return false. - // - // The case where the number match but the contents are not equal - // is covered in the next block below. - if len(s.Subscriptions) != len(in.Subscriptions) { - return false - } - - // Iterate all subscriptions and return false on any inequality. - for k := range s.Subscriptions { - - // If subscription not found at incoming status. - ins, ok := in.Subscriptions[k] - if !ok { - return false - } - - // If subscription found at incoming status, but soft equal - // of their contents is not true. - if !s.Subscriptions[k].EqualSoftStatus(ins) { - return false - } - } - - return true -} - -// EqualSoftStatus compares verbatim two status instances. -// -// This function is not thread safe, it is up to the caller to make sure structures are -// not concurrently modified. -func (s *Status) EqualStatus(in *Status) bool { - // If ingest not equal, return false. - if !s.Ingest.EqualStatus(&in.Ingest) { - return false - } - - // If subscriptions have been added or deleted, return false. - // - // The case where the number match but the contents are not equal - // is covered in the next block below. - if len(s.Subscriptions) != len(in.Subscriptions) { - return false - } - - // Iterate all subscriptions and return false on any inequality. - for k := range s.Subscriptions { - - // If subscription not found at incoming status. - ins, ok := in.Subscriptions[k] - if !ok { - return false - } - - // If subscription found at incoming status, but equal - // of their contents is not true. - if !s.Subscriptions[k].EqualStatus(ins) { - return false - } - } - - return s.LastUpdated == in.LastUpdated -} - -type IngestStatus struct { - Status IngestStatusChoice `json:"status"` - Message *string `json:"message,omitempty"` - - // LastIngested event into the broker. - LastIngested *time.Time `json:"lastIngested,omitempty"` -} - -func (is *IngestStatus) EqualSoftStatus(in *IngestStatus) bool { - if is.Message == nil && in.Message != nil || - is.Message != nil && in.Message == nil || - (is.Message != nil && in.Message != nil && *is.Message != *in.Message) { - return false - } - - return is.Status == in.Status -} - -func (is *IngestStatus) EqualStatus(in *IngestStatus) bool { - if !is.EqualSoftStatus(in) { - return false - } - - if is.LastIngested == nil && in.LastIngested != nil || - is.LastIngested != nil && in.LastIngested == nil || - (is.LastIngested != nil && in.LastIngested != nil && *is.LastIngested != *in.LastIngested) { - return false - } - - return true -} - -type SubscriptionStatus struct { - Status SubscriptionStatusChoice `json:"status"` - Message *string `json:"message,omitempty"` - - LastProcessed *time.Time `json:"lastProcessed,omitempty"` -} - -func (ss *SubscriptionStatus) Merge(in *SubscriptionStatus) { - if in == nil { - return - } - if ss == nil { - *ss = *in - return - } - - if ss.LastProcessed == nil && in.LastProcessed != nil { - ss.LastProcessed = in.LastProcessed - } - - if ss.Status == "" && in.Status != "" { - ss.Status = in.Status - } - - // Message is merged only if the status does not change - if ss.Message == nil && in.Message != nil && ss.Status == in.Status { - ss.Message = in.Message - } -} - -func (ss *SubscriptionStatus) EqualSoftStatus(in *SubscriptionStatus) bool { - if ss.Message == nil && in.Message != nil || - ss.Message != nil && in.Message == nil || - (ss.Message != nil && in.Message != nil && *ss.Message != *in.Message) { - return false - } - - return ss.Status == in.Status -} - -func (ss *SubscriptionStatus) EqualStatus(in *SubscriptionStatus) bool { - if !ss.EqualSoftStatus(in) { - return false - } - - if ss.LastProcessed == nil && in.LastProcessed != nil || - ss.LastProcessed != nil && in.LastProcessed == nil || - (ss.LastProcessed != nil && in.LastProcessed != nil && *ss.LastProcessed != *in.LastProcessed) { - return false - } - - return true -} diff --git a/pkg/brokers/subscriptions/manager.go b/pkg/brokers/subscriptions/manager.go deleted file mode 100644 index 546166fe..00000000 --- a/pkg/brokers/subscriptions/manager.go +++ /dev/null @@ -1,148 +0,0 @@ -package subscriptions - -import ( - "context" - "fmt" - "reflect" - "sync" - - obshttp "github.com/cloudevents/sdk-go/observability/opencensus/v2/http" - ceclient "github.com/cloudevents/sdk-go/v2/client" - "go.uber.org/zap" - - "knative.dev/pkg/logging" - - "github.com/zeiss/typhoon/pkg/brokers/backend" - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" - "github.com/zeiss/typhoon/pkg/brokers/status" - "github.com/zeiss/typhoon/pkg/brokers/subscriptions/metrics" -) - -type Subscription struct { - Trigger cfgbroker.Trigger -} - -type Manager struct { - logger *zap.SugaredLogger - - backend backend.Interface - statusManager status.Manager - - // Subscribers map indexed by name - subscribers map[string]*subscriber - - ctx context.Context - m sync.RWMutex -} - -func New(inctx context.Context, logger *zap.SugaredLogger, be backend.Interface, statusManager status.Manager) (*Manager, error) { - // Needed for Knative filters - ctx := logging.WithLogger(inctx, logger) - - return &Manager{ - backend: be, - subscribers: make(map[string]*subscriber), - logger: logger, - statusManager: statusManager, - ctx: ctx, - }, nil -} - -func (m *Manager) UpdateFromConfig(c *cfgbroker.Config) { - m.logger.Info("Updating subscriptions configuration") - m.m.Lock() - defer m.m.Unlock() - - for name, sub := range m.subscribers { - if _, ok := c.Triggers[name]; !ok { - m.logger.Infow("Deleting subscription", zap.String("name", name)) - sub.unsubscribe() - delete(m.subscribers, name) - - if m.statusManager != nil { - m.statusManager.EnsureNoSubscription(name) - } - } - } - - for name, trigger := range c.Triggers { - s, ok := m.subscribers[name] - if !ok { - s, err := m.createSubscriber(name, trigger) - if err != nil { - msg := "Failed to create trigger subscription" - m.logger.Errorw(msg, zap.String("trigger", name), zap.Error(err)) - if m.statusManager != nil { - m.statusManager.EnsureSubscription(name, &status.SubscriptionStatus{ - Status: status.SubscriptionStatusFailed, - Message: &msg, - }) - } - - continue - } - - if m.statusManager != nil { - // Initial state is Ready, it changes to Running when - // the first event is processed. - m.statusManager.EnsureSubscription(name, &status.SubscriptionStatus{ - Status: status.SubscriptionStatusReady, - }) - } - - m.subscribers[name] = s - m.logger.Infow("Subscription for trigger updated", zap.String("name", name)) - continue - } - - if reflect.DeepEqual(s.trigger, trigger) { - // If there are no changes to the subscription, skip. - continue - } - - // Update existing subscription with new data. - m.logger.Infow("Updating subscription upon trigger configuration", zap.String("name", name), zap.Any("trigger", trigger)) - if err := s.updateTrigger(trigger); err != nil { - m.logger.Errorw("Could not setup trigger", zap.String("name", name), zap.Error(err)) - return - } - } -} - -func (m *Manager) createSubscriber(name string, trigger cfgbroker.Trigger) (*subscriber, error) { - // Create CloudEvents client with reporter for Trigger. - ir, err := metrics.NewReporter(m.ctx, name) - if err != nil { - return nil, fmt.Errorf("failed to setup trigger stats reporter: %w", err) - } - - p, err := obshttp.NewObservedHTTP() - if err != nil { - return nil, fmt.Errorf("could not create CloudEvents HTTP protocol: %w", err) - } - - ceClient, err := ceclient.New(p, ceclient.WithObservabilityService(metrics.NewOpenCensusObservabilityService(ir))) - if err != nil { - return nil, fmt.Errorf("could not create CloudEvents HTTP client: %w", err) - } - - s := &subscriber{ - name: name, - backend: m.backend, - statusManager: m.statusManager, - ceClient: ceClient, - parentCtx: m.ctx, - logger: m.logger, - } - - m.logger.Infow("Creating new subscription from trigger configuration", zap.String("name", name), zap.Any("trigger", trigger)) - if err := s.updateTrigger(trigger); err != nil { - return nil, fmt.Errorf("could not setup trigger: %w", err) - } - - if err := m.backend.Subscribe(name, trigger.Bounds, s.dispatchCloudEvent, s.statusChange); err != nil { - return nil, fmt.Errorf("could not create subscription for trigger: %w", err) - } - - return s, nil -} diff --git a/pkg/brokers/subscriptions/metrics/observability_service.go b/pkg/brokers/subscriptions/metrics/observability_service.go deleted file mode 100644 index d0d8422f..00000000 --- a/pkg/brokers/subscriptions/metrics/observability_service.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "context" - "time" - - "go.opencensus.io/trace" - - occlient "github.com/cloudevents/sdk-go/observability/opencensus/v2/client" - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/cloudevents/sdk-go/v2/binding" - ceclient "github.com/cloudevents/sdk-go/v2/client" - "github.com/cloudevents/sdk-go/v2/observability" - - "github.com/cloudevents/sdk-go/v2/protocol" -) - -type opencensusObservabilityService struct { - reporter Reporter -} - -func NewOpenCensusObservabilityService(r Reporter) ceclient.ObservabilityService { - return opencensusObservabilityService{ - reporter: r, - } -} - -func (o opencensusObservabilityService) InboundContextDecorators() []func(context.Context, binding.Message) context.Context { - return []func(context.Context, binding.Message) context.Context{tracePropagatorContextDecorator} -} - -func (o opencensusObservabilityService) RecordReceivedMalformedEvent(ctx context.Context, err error) { - // Not used at trigger -} - -func (o opencensusObservabilityService) RecordCallingInvoker(ctx context.Context, event *cloudevents.Event) (context.Context, func(errOrResult error)) { - // Not used at trigger - return ctx, nil -} - -func (o opencensusObservabilityService) RecordSendingEvent(ctx context.Context, event cloudevents.Event) (context.Context, func(errOrResult error)) { - // Not used at trigger - return ctx, nil -} - -func (o opencensusObservabilityService) RecordRequestEvent(ctx context.Context, sentEvent cloudevents.Event) (context.Context, func(errOrResult error, event *cloudevents.Event)) { - start := time.Now() - ctx, span := trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindClient)) - if span.IsRecordingEvents() { - span.AddAttributes(occlient.EventTraceAttributes(&sentEvent)...) - } - - return ctx, func(errOrResult error, receivedEvent *cloudevents.Event) { - span.End() - - receivedType := "" - if receivedEvent != nil { - receivedType = receivedEvent.Type() - } - - o.reporter.ReportTriggeredEvent( - protocol.IsACK(errOrResult), - sentEvent.Type(), - receivedType, - float64(time.Since(start)/time.Millisecond)) - } -} - -func tracePropagatorContextDecorator(ctx context.Context, msg binding.Message) context.Context { - var messageCtx context.Context - if mctx, ok := msg.(binding.MessageContext); ok { - messageCtx = mctx.Context() - } else if mctx, ok := binding.UnwrapMessage(msg).(binding.MessageContext); ok { - messageCtx = mctx.Context() - } - - if messageCtx == nil { - return ctx - } - span := trace.FromContext(messageCtx) - if span == nil { - return ctx - } - return trace.NewContext(ctx, span) -} diff --git a/pkg/brokers/subscriptions/metrics/reporter.go b/pkg/brokers/subscriptions/metrics/reporter.go deleted file mode 100644 index fc720651..00000000 --- a/pkg/brokers/subscriptions/metrics/reporter.go +++ /dev/null @@ -1,123 +0,0 @@ -package metrics - -import ( - "context" - "fmt" - "strconv" - "sync" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.uber.org/zap" - - knmetrics "knative.dev/pkg/metrics" - - "github.com/zeiss/typhoon/pkg/brokers/common/metrics" -) - -const ( - LabelDelivered = "delivered" - LabelSentEventType = "sent_type" - LabelTrigger = "trigger_name" -) - -var ( - sentEventTypeKey = tag.MustNewKey(LabelSentEventType) - deliveredEventKey = tag.MustNewKey(LabelDelivered) - triggerKey = tag.MustNewKey(LabelTrigger) - - // eventCountM is a counter which records the number of events received - // by the Broker. - eventCountM = stats.Int64( - "trigger/event_count", - "Number of events sent via Trigger subscription.", - stats.UnitDimensionless, - ) - - // latencyMs measures the latency in milliseconds for the CloudEvents - // client methods. - latencyMs = stats.Float64( - "trigger/event_latency", - "The latency in milliseconds for the broker Trigger subscriptions.", - "ms") -) - -func registerStatViews() error { - tagKeys := []tag.Key{ - triggerKey, - sentEventTypeKey, - deliveredEventKey, - } - - // Create view to see our measurements. - return knmetrics.RegisterResourceView( - &view.View{ - Name: latencyMs.Name(), - Description: latencyMs.Description(), - Measure: latencyMs, - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: append(tagKeys, metrics.ReceivedEventTypeKey), - }, - &view.View{ - Name: eventCountM.Name(), - Description: eventCountM.Description(), - Measure: eventCountM, - Aggregation: view.Count(), - TagKeys: tagKeys, - }, - ) -} - -func initContext(ctx context.Context, triggerName string) (context.Context, error) { - return tag.New(ctx, tag.Insert(triggerKey, triggerName)) -} - -type Reporter interface { - ReportTriggeredEvent(delivered bool, sentType, receivedType string, msLatency float64) -} - -// Reporter holds cached metric objects to report ingress metrics. -type reporter struct { - ctx context.Context - logger *zap.SugaredLogger -} - -var once sync.Once - -// NewReporter retuns a StatReporter for ingested events. -func NewReporter(context context.Context, trigger string) (Reporter, error) { - r := &reporter{} - - var err error - once.Do(func() { - if err = registerStatViews(); err != nil { - err = fmt.Errorf("error registering OpenCensus stats view: %w", err) - return - } - }) - - if err != nil { - return nil, err - } - - r.ctx, err = initContext(context, trigger) - if err != nil { - return nil, fmt.Errorf("error initializing OpenCensus context with tags: %w", err) - } - - return r, nil -} - -func (r *reporter) ReportTriggeredEvent(delivered bool, sentType, receivedType string, msLatency float64) { - ctx, err := tag.New(r.ctx, - tag.Insert(sentEventTypeKey, sentType), - tag.Insert(deliveredEventKey, strconv.FormatBool(delivered)), - ) - if err != nil { - r.logger.Errorw("error setting tags to OpenCensus context", zap.Error(err)) - } - - knmetrics.Record(ctx, latencyMs.M(msLatency), stats.WithTags(tag.Insert(metrics.ReceivedEventTypeKey, receivedType))) - knmetrics.Record(ctx, eventCountM.M(1)) -} diff --git a/pkg/brokers/subscriptions/subscriber.go b/pkg/brokers/subscriptions/subscriber.go deleted file mode 100644 index 180e0e85..00000000 --- a/pkg/brokers/subscriptions/subscriber.go +++ /dev/null @@ -1,227 +0,0 @@ -package subscriptions - -import ( - "context" - "fmt" - "sync" - "time" - - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/rickb777/date/period" - "go.uber.org/zap" - - "knative.dev/eventing/pkg/eventfilter" - "knative.dev/eventing/pkg/eventfilter/subscriptionsapi" - "knative.dev/pkg/logging" - - "github.com/zeiss/typhoon/pkg/brokers/backend" - cfgbroker "github.com/zeiss/typhoon/pkg/brokers/config/broker" - "github.com/zeiss/typhoon/pkg/brokers/status" -) - -type subscriber struct { - trigger cfgbroker.Trigger - - name string - backend backend.Interface - statusManager status.Manager - ceClient cloudevents.Client - - // We need to have both the parent context used to build the subscriber and the - // local context used to send CloudEvents that contains the target and delivery - // options. - // The local context will be re-created from the parent context every time a - // change is done to the trigger - parentCtx context.Context - ctx context.Context - - logger *zap.SugaredLogger - m sync.RWMutex -} - -func (s *subscriber) unsubscribe() { - s.backend.Unsubscribe(s.name) -} - -func (s *subscriber) updateTrigger(trigger cfgbroker.Trigger) error { - // Target URL might be informed as empty to support temporary - // unavailability. - url := "" - if trigger.Target.URL != nil { - url = *trigger.Target.URL - } - ctx := cloudevents.ContextWithTarget(s.parentCtx, url) - - // HACK temporary to make the Delivery options move smooth, - // remove the method and access the field when the structure is - // completely migrated to having the delivery options at the root. - if do := trigger.GetDeliveryOptions(); do != nil && - do.Retry != nil && - *do.Retry >= 1 && - do.BackoffPolicy != nil { - - delay, err := period.Parse(*do.BackoffDelay) - if err != nil { - return fmt.Errorf("could not apply trigger %q configuration due to backoff delay parsing: %w", s.name, err) - } - - switch *do.BackoffPolicy { - case cfgbroker.BackoffPolicyLinear: - ctx = cloudevents.ContextWithRetriesLinearBackoff( - ctx, delay.DurationApprox(), int(*do.Retry)) - - case cfgbroker.BackoffPolicyExponential: - ctx = cloudevents.ContextWithRetriesExponentialBackoff( - ctx, delay.DurationApprox(), int(*do.Retry)) - - default: - ctx = cloudevents.ContextWithRetriesConstantBackoff( - ctx, delay.DurationApprox(), int(*do.Retry)) - } - } - - s.m.Lock() - defer s.m.Unlock() - - s.trigger = trigger - s.ctx = ctx - - return nil -} - -func (s *subscriber) dispatchCloudEvent(event *cloudevents.Event) { - s.m.RLock() - defer s.m.RUnlock() - - if s.statusManager != nil { - defer func() { - t := time.Now() - s.statusChange(&status.SubscriptionStatus{ - Status: status.SubscriptionStatusRunning, - LastProcessed: &t, - }) - }() - } - - res := subscriptionsapi.NewAllFilter(materializeFiltersList(s.ctx, s.trigger.Filters)...).Filter(s.ctx, *event) - if res == eventfilter.FailFilter { - s.logger.Debugw("Skipped delivery due to filter", zap.Any("event", *event)) - return - } - - // Only try to send if target URL has been configured. When not - // configured try to send to the dead letter sink. - url := cloudevents.TargetFromContext(s.ctx) - if url != nil && s.send(s.ctx, event) { - return - } - - // If the event could not be sent (including retries), check for DLS - // and send if is it configured. - if do := s.trigger.GetDeliveryOptions(); do != nil && do.DeadLetterURL != nil && *do.DeadLetterURL != "" { - dlsCtx := cloudevents.ContextWithTarget(s.parentCtx, *do.DeadLetterURL) - if s.send(dlsCtx, event) { - return - } - } - - // If the event could not be sent either to the target or the DLS just write a log entry. - // Set the attribute `lost: true` to help log aggregators identify lost events by querying. - msg := "Event was lost" - if url != nil { - msg += " while sending to " + url.String() - } - s.logger.Errorw(msg, zap.Bool("lost", true), - zap.String("type", event.Type()), zap.String("source", event.Source()), zap.String("id", event.ID())) -} - -func (s *subscriber) statusChange(ss *status.SubscriptionStatus) { - if s.statusManager != nil { - s.statusManager.EnsureSubscription(s.name, ss) - } -} - -func (s *subscriber) send(ctx context.Context, event *cloudevents.Event) bool { - res, result := s.ceClient.Request(ctx, *event) - - switch { - case cloudevents.IsACK(result): - if res != nil { - if err := s.backend.Produce(ctx, res); err != nil { - s.logger.Errorw(fmt.Sprintf("Failed to consume response from %s", - cloudevents.TargetFromContext(ctx).String()), - zap.Error(err), zap.String("type", res.Type()), zap.String("source", res.Source()), zap.String("id", res.ID())) - - // Not ingesting the response is considered an error. - // TODO make this configurable. - return false - } - } - return true - - case cloudevents.IsUndelivered(result): - s.logger.Errorw(fmt.Sprintf("Failed to send event to %s", - cloudevents.TargetFromContext(ctx).String()), - zap.Error(result), zap.String("type", event.Type()), zap.String("source", event.Source()), zap.String("id", event.ID())) - return false - - case cloudevents.IsNACK(result): - s.logger.Errorw(fmt.Sprintf("Event not accepted at %s", - cloudevents.TargetFromContext(ctx).String()), - zap.Error(result), zap.String("type", event.Type()), zap.String("source", event.Source()), zap.String("id", event.ID())) - return false - } - - s.logger.Errorw(fmt.Sprintf("Unknown event send outcome at %s", - cloudevents.TargetFromContext(ctx).String()), - zap.Error(result), zap.String("type", event.Type()), zap.String("source", event.Source()), zap.String("id", event.ID())) - return false -} - -func materializeFiltersList(ctx context.Context, filters []cfgbroker.Filter) []eventfilter.Filter { - materializedFilters := make([]eventfilter.Filter, 0, len(filters)) - for _, f := range filters { - f := materializeSubscriptionsAPIFilter(ctx, f) - if f == nil { - logging.FromContext(ctx).Warnw("Failed to parse filter. Skipping filter.", zap.Any("filter", f)) - continue - } - materializedFilters = append(materializedFilters, f) - } - return materializedFilters -} - -func materializeSubscriptionsAPIFilter(ctx context.Context, filter cfgbroker.Filter) eventfilter.Filter { - var materializedFilter eventfilter.Filter - var err error - switch { - case len(filter.Exact) > 0: - // The webhook validates that this map has only a single key:value pair. - materializedFilter, err = subscriptionsapi.NewExactFilter("type", filter.Exact["type"]) - if err != nil { - logging.FromContext(ctx).Debugw("Invalid exact expression", zap.Any("filters", filter.Exact), zap.Error(err)) - return nil - } - case len(filter.Prefix) > 0: - // The webhook validates that this map has only a single key:value pair. - materializedFilter, err = subscriptionsapi.NewPrefixFilter("type", filter.Prefix["type"]) - if err != nil { - logging.FromContext(ctx).Debugw("Invalid prefix expression", zap.Any("filters", filter.Exact), zap.Error(err)) - return nil - } - case len(filter.Suffix) > 0: - // The webhook validates that this map has only a single key:value pair. - materializedFilter, err = subscriptionsapi.NewSuffixFilter("type", filter.Suffix["type"]) - if err != nil { - logging.FromContext(ctx).Debugw("Invalid suffix expression", zap.Any("filters", filter.Exact), zap.Error(err)) - return nil - } - case len(filter.All) > 0: - materializedFilter = subscriptionsapi.NewAllFilter(materializeFiltersList(ctx, filter.All)...) - case len(filter.Any) > 0: - materializedFilter = subscriptionsapi.NewAnyFilter(materializeFiltersList(ctx, filter.Any)...) - case filter.Not != nil: - materializedFilter = subscriptionsapi.NewNotFilter(materializeSubscriptionsAPIFilter(ctx, *filter.Not)) - } - return materializedFilter -} diff --git a/pkg/client/generated/clientset/internalclientset/clientset.go b/pkg/client/generated/clientset/internalclientset/clientset.go index c6d220aa..c0739790 100644 --- a/pkg/client/generated/clientset/internalclientset/clientset.go +++ b/pkg/client/generated/clientset/internalclientset/clientset.go @@ -6,7 +6,6 @@ import ( "fmt" "net/http" - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1" extensionsv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/extensions/v1alpha1" flowv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/flow/v1alpha1" routingv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/routing/v1alpha1" @@ -19,7 +18,6 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface - EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface ExtensionsV1alpha1() extensionsv1alpha1.ExtensionsV1alpha1Interface FlowV1alpha1() flowv1alpha1.FlowV1alpha1Interface RoutingV1alpha1() routingv1alpha1.RoutingV1alpha1Interface @@ -30,7 +28,6 @@ type Interface interface { // Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient - eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client extensionsV1alpha1 *extensionsv1alpha1.ExtensionsV1alpha1Client flowV1alpha1 *flowv1alpha1.FlowV1alpha1Client routingV1alpha1 *routingv1alpha1.RoutingV1alpha1Client @@ -38,11 +35,6 @@ type Clientset struct { targetsV1alpha1 *targetsv1alpha1.TargetsV1alpha1Client } -// EventingV1alpha1 retrieves the EventingV1alpha1Client -func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface { - return c.eventingV1alpha1 -} - // ExtensionsV1alpha1 retrieves the ExtensionsV1alpha1Client func (c *Clientset) ExtensionsV1alpha1() extensionsv1alpha1.ExtensionsV1alpha1Interface { return c.extensionsV1alpha1 @@ -112,10 +104,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, var cs Clientset var err error - cs.eventingV1alpha1, err = eventingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.extensionsV1alpha1, err = extensionsv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -157,7 +145,6 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset - cs.eventingV1alpha1 = eventingv1alpha1.New(c) cs.extensionsV1alpha1 = extensionsv1alpha1.New(c) cs.flowV1alpha1 = flowv1alpha1.New(c) cs.routingV1alpha1 = routingv1alpha1.New(c) diff --git a/pkg/client/generated/clientset/internalclientset/fake/clientset_generated.go b/pkg/client/generated/clientset/internalclientset/fake/clientset_generated.go index 8c817b8f..6175860b 100644 --- a/pkg/client/generated/clientset/internalclientset/fake/clientset_generated.go +++ b/pkg/client/generated/clientset/internalclientset/fake/clientset_generated.go @@ -4,8 +4,6 @@ package fake import ( clientset "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset" - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1" - fakeeventingv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake" extensionsv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/extensions/v1alpha1" fakeextensionsv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/extensions/v1alpha1/fake" flowv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/flow/v1alpha1" @@ -73,11 +71,6 @@ var ( _ testing.FakeClient = &Clientset{} ) -// EventingV1alpha1 retrieves the EventingV1alpha1Client -func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface { - return &fakeeventingv1alpha1.FakeEventingV1alpha1{Fake: &c.Fake} -} - // ExtensionsV1alpha1 retrieves the ExtensionsV1alpha1Client func (c *Clientset) ExtensionsV1alpha1() extensionsv1alpha1.ExtensionsV1alpha1Interface { return &fakeextensionsv1alpha1.FakeExtensionsV1alpha1{Fake: &c.Fake} diff --git a/pkg/client/generated/clientset/internalclientset/fake/register.go b/pkg/client/generated/clientset/internalclientset/fake/register.go index ce19bad5..8992e2cf 100644 --- a/pkg/client/generated/clientset/internalclientset/fake/register.go +++ b/pkg/client/generated/clientset/internalclientset/fake/register.go @@ -3,7 +3,6 @@ package fake import ( - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" extensionsv1alpha1 "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" flowv1alpha1 "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" routingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/routing/v1alpha1" @@ -20,7 +19,6 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - eventingv1alpha1.AddToScheme, extensionsv1alpha1.AddToScheme, flowv1alpha1.AddToScheme, routingv1alpha1.AddToScheme, diff --git a/pkg/client/generated/clientset/internalclientset/scheme/register.go b/pkg/client/generated/clientset/internalclientset/scheme/register.go index 651e8b3c..66d52249 100644 --- a/pkg/client/generated/clientset/internalclientset/scheme/register.go +++ b/pkg/client/generated/clientset/internalclientset/scheme/register.go @@ -3,7 +3,6 @@ package scheme import ( - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" extensionsv1alpha1 "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" flowv1alpha1 "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" routingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/routing/v1alpha1" @@ -20,7 +19,6 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ - eventingv1alpha1.AddToScheme, extensionsv1alpha1.AddToScheme, flowv1alpha1.AddToScheme, routingv1alpha1.AddToScheme, diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/doc.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/doc.go deleted file mode 100644 index 93a7ca4e..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/eventing_client.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/eventing_client.go deleted file mode 100644 index 8e2ac286..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/eventing_client.go +++ /dev/null @@ -1,96 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/scheme" - rest "k8s.io/client-go/rest" -) - -type EventingV1alpha1Interface interface { - RESTClient() rest.Interface - RedisBrokersGetter - TriggersGetter -} - -// EventingV1alpha1Client is used to interact with features provided by the eventing.typhoon.zeiss.com group. -type EventingV1alpha1Client struct { - restClient rest.Interface -} - -func (c *EventingV1alpha1Client) RedisBrokers(namespace string) RedisBrokerInterface { - return newRedisBrokers(c, namespace) -} - -func (c *EventingV1alpha1Client) Triggers(namespace string) TriggerInterface { - return newTriggers(c, namespace) -} - -// NewForConfig creates a new EventingV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*EventingV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new EventingV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &EventingV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new EventingV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *EventingV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new EventingV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *EventingV1alpha1Client { - return &EventingV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *EventingV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/doc.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/doc.go deleted file mode 100644 index 2b5ba4c8..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_eventing_client.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_eventing_client.go deleted file mode 100644 index 3c75ee85..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_eventing_client.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeEventingV1alpha1 struct { - *testing.Fake -} - -func (c *FakeEventingV1alpha1) RedisBrokers(namespace string) v1alpha1.RedisBrokerInterface { - return &FakeRedisBrokers{c, namespace} -} - -func (c *FakeEventingV1alpha1) Triggers(namespace string) v1alpha1.TriggerInterface { - return &FakeTriggers{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeEventingV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_redisbroker.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_redisbroker.go deleted file mode 100644 index 8f7a2d83..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_redisbroker.go +++ /dev/null @@ -1,125 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeRedisBrokers implements RedisBrokerInterface -type FakeRedisBrokers struct { - Fake *FakeEventingV1alpha1 - ns string -} - -var redisbrokersResource = v1alpha1.SchemeGroupVersion.WithResource("redisbrokers") - -var redisbrokersKind = v1alpha1.SchemeGroupVersion.WithKind("RedisBroker") - -// Get takes name of the redisBroker, and returns the corresponding redisBroker object, and an error if there is any. -func (c *FakeRedisBrokers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RedisBroker, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(redisbrokersResource, c.ns, name), &v1alpha1.RedisBroker{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RedisBroker), err -} - -// List takes label and field selectors, and returns the list of RedisBrokers that match those selectors. -func (c *FakeRedisBrokers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RedisBrokerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(redisbrokersResource, redisbrokersKind, c.ns, opts), &v1alpha1.RedisBrokerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.RedisBrokerList{ListMeta: obj.(*v1alpha1.RedisBrokerList).ListMeta} - for _, item := range obj.(*v1alpha1.RedisBrokerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested redisBrokers. -func (c *FakeRedisBrokers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(redisbrokersResource, c.ns, opts)) - -} - -// Create takes the representation of a redisBroker and creates it. Returns the server's representation of the redisBroker, and an error, if there is any. -func (c *FakeRedisBrokers) Create(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.CreateOptions) (result *v1alpha1.RedisBroker, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(redisbrokersResource, c.ns, redisBroker), &v1alpha1.RedisBroker{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RedisBroker), err -} - -// Update takes the representation of a redisBroker and updates it. Returns the server's representation of the redisBroker, and an error, if there is any. -func (c *FakeRedisBrokers) Update(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.UpdateOptions) (result *v1alpha1.RedisBroker, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(redisbrokersResource, c.ns, redisBroker), &v1alpha1.RedisBroker{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RedisBroker), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeRedisBrokers) UpdateStatus(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.UpdateOptions) (*v1alpha1.RedisBroker, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(redisbrokersResource, "status", c.ns, redisBroker), &v1alpha1.RedisBroker{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RedisBroker), err -} - -// Delete takes name of the redisBroker and deletes it. Returns an error if one occurs. -func (c *FakeRedisBrokers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(redisbrokersResource, c.ns, name, opts), &v1alpha1.RedisBroker{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeRedisBrokers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(redisbrokersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.RedisBrokerList{}) - return err -} - -// Patch applies the patch and returns the patched redisBroker. -func (c *FakeRedisBrokers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RedisBroker, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(redisbrokersResource, c.ns, name, pt, data, subresources...), &v1alpha1.RedisBroker{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.RedisBroker), err -} diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_trigger.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_trigger.go deleted file mode 100644 index 9e3fe496..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/fake/fake_trigger.go +++ /dev/null @@ -1,125 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeTriggers implements TriggerInterface -type FakeTriggers struct { - Fake *FakeEventingV1alpha1 - ns string -} - -var triggersResource = v1alpha1.SchemeGroupVersion.WithResource("triggers") - -var triggersKind = v1alpha1.SchemeGroupVersion.WithKind("Trigger") - -// Get takes name of the trigger, and returns the corresponding trigger object, and an error if there is any. -func (c *FakeTriggers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Trigger, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(triggersResource, c.ns, name), &v1alpha1.Trigger{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Trigger), err -} - -// List takes label and field selectors, and returns the list of Triggers that match those selectors. -func (c *FakeTriggers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TriggerList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(triggersResource, triggersKind, c.ns, opts), &v1alpha1.TriggerList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.TriggerList{ListMeta: obj.(*v1alpha1.TriggerList).ListMeta} - for _, item := range obj.(*v1alpha1.TriggerList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested triggers. -func (c *FakeTriggers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(triggersResource, c.ns, opts)) - -} - -// Create takes the representation of a trigger and creates it. Returns the server's representation of the trigger, and an error, if there is any. -func (c *FakeTriggers) Create(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.CreateOptions) (result *v1alpha1.Trigger, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(triggersResource, c.ns, trigger), &v1alpha1.Trigger{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Trigger), err -} - -// Update takes the representation of a trigger and updates it. Returns the server's representation of the trigger, and an error, if there is any. -func (c *FakeTriggers) Update(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.UpdateOptions) (result *v1alpha1.Trigger, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(triggersResource, c.ns, trigger), &v1alpha1.Trigger{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Trigger), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTriggers) UpdateStatus(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.UpdateOptions) (*v1alpha1.Trigger, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(triggersResource, "status", c.ns, trigger), &v1alpha1.Trigger{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Trigger), err -} - -// Delete takes name of the trigger and deletes it. Returns an error if one occurs. -func (c *FakeTriggers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(triggersResource, c.ns, name, opts), &v1alpha1.Trigger{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeTriggers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(triggersResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.TriggerList{}) - return err -} - -// Patch applies the patch and returns the patched trigger. -func (c *FakeTriggers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Trigger, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(triggersResource, c.ns, name, pt, data, subresources...), &v1alpha1.Trigger{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Trigger), err -} diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/generated_expansion.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/generated_expansion.go deleted file mode 100644 index 88898b08..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,7 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type RedisBrokerExpansion interface{} - -type TriggerExpansion interface{} diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/redisbroker.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/redisbroker.go deleted file mode 100644 index 7a92b5fe..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/redisbroker.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - scheme "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// RedisBrokersGetter has a method to return a RedisBrokerInterface. -// A group's client should implement this interface. -type RedisBrokersGetter interface { - RedisBrokers(namespace string) RedisBrokerInterface -} - -// RedisBrokerInterface has methods to work with RedisBroker resources. -type RedisBrokerInterface interface { - Create(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.CreateOptions) (*v1alpha1.RedisBroker, error) - Update(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.UpdateOptions) (*v1alpha1.RedisBroker, error) - UpdateStatus(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.UpdateOptions) (*v1alpha1.RedisBroker, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RedisBroker, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RedisBrokerList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RedisBroker, err error) - RedisBrokerExpansion -} - -// redisBrokers implements RedisBrokerInterface -type redisBrokers struct { - client rest.Interface - ns string -} - -// newRedisBrokers returns a RedisBrokers -func newRedisBrokers(c *EventingV1alpha1Client, namespace string) *redisBrokers { - return &redisBrokers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the redisBroker, and returns the corresponding redisBroker object, and an error if there is any. -func (c *redisBrokers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RedisBroker, err error) { - result = &v1alpha1.RedisBroker{} - err = c.client.Get(). - Namespace(c.ns). - Resource("redisbrokers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RedisBrokers that match those selectors. -func (c *redisBrokers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RedisBrokerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RedisBrokerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("redisbrokers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested redisBrokers. -func (c *redisBrokers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("redisbrokers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a redisBroker and creates it. Returns the server's representation of the redisBroker, and an error, if there is any. -func (c *redisBrokers) Create(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.CreateOptions) (result *v1alpha1.RedisBroker, err error) { - result = &v1alpha1.RedisBroker{} - err = c.client.Post(). - Namespace(c.ns). - Resource("redisbrokers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(redisBroker). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a redisBroker and updates it. Returns the server's representation of the redisBroker, and an error, if there is any. -func (c *redisBrokers) Update(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.UpdateOptions) (result *v1alpha1.RedisBroker, err error) { - result = &v1alpha1.RedisBroker{} - err = c.client.Put(). - Namespace(c.ns). - Resource("redisbrokers"). - Name(redisBroker.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(redisBroker). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *redisBrokers) UpdateStatus(ctx context.Context, redisBroker *v1alpha1.RedisBroker, opts v1.UpdateOptions) (result *v1alpha1.RedisBroker, err error) { - result = &v1alpha1.RedisBroker{} - err = c.client.Put(). - Namespace(c.ns). - Resource("redisbrokers"). - Name(redisBroker.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(redisBroker). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the redisBroker and deletes it. Returns an error if one occurs. -func (c *redisBrokers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("redisbrokers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *redisBrokers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("redisbrokers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched redisBroker. -func (c *redisBrokers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RedisBroker, err error) { - result = &v1alpha1.RedisBroker{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("redisbrokers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/trigger.go b/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/trigger.go deleted file mode 100644 index 54f7907b..00000000 --- a/pkg/client/generated/clientset/internalclientset/typed/eventing/v1alpha1/trigger.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - scheme "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// TriggersGetter has a method to return a TriggerInterface. -// A group's client should implement this interface. -type TriggersGetter interface { - Triggers(namespace string) TriggerInterface -} - -// TriggerInterface has methods to work with Trigger resources. -type TriggerInterface interface { - Create(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.CreateOptions) (*v1alpha1.Trigger, error) - Update(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.UpdateOptions) (*v1alpha1.Trigger, error) - UpdateStatus(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.UpdateOptions) (*v1alpha1.Trigger, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Trigger, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TriggerList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Trigger, err error) - TriggerExpansion -} - -// triggers implements TriggerInterface -type triggers struct { - client rest.Interface - ns string -} - -// newTriggers returns a Triggers -func newTriggers(c *EventingV1alpha1Client, namespace string) *triggers { - return &triggers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the trigger, and returns the corresponding trigger object, and an error if there is any. -func (c *triggers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Trigger, err error) { - result = &v1alpha1.Trigger{} - err = c.client.Get(). - Namespace(c.ns). - Resource("triggers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Triggers that match those selectors. -func (c *triggers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TriggerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.TriggerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("triggers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested triggers. -func (c *triggers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("triggers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a trigger and creates it. Returns the server's representation of the trigger, and an error, if there is any. -func (c *triggers) Create(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.CreateOptions) (result *v1alpha1.Trigger, err error) { - result = &v1alpha1.Trigger{} - err = c.client.Post(). - Namespace(c.ns). - Resource("triggers"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(trigger). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a trigger and updates it. Returns the server's representation of the trigger, and an error, if there is any. -func (c *triggers) Update(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.UpdateOptions) (result *v1alpha1.Trigger, err error) { - result = &v1alpha1.Trigger{} - err = c.client.Put(). - Namespace(c.ns). - Resource("triggers"). - Name(trigger.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(trigger). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *triggers) UpdateStatus(ctx context.Context, trigger *v1alpha1.Trigger, opts v1.UpdateOptions) (result *v1alpha1.Trigger, err error) { - result = &v1alpha1.Trigger{} - err = c.client.Put(). - Namespace(c.ns). - Resource("triggers"). - Name(trigger.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(trigger). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the trigger and deletes it. Returns an error if one occurs. -func (c *triggers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("triggers"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *triggers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("triggers"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched trigger. -func (c *triggers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Trigger, err error) { - result = &v1alpha1.Trigger{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("triggers"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/generated/informers/externalversions/eventing/interface.go b/pkg/client/generated/informers/externalversions/eventing/interface.go deleted file mode 100644 index 0fbc2d9f..00000000 --- a/pkg/client/generated/informers/externalversions/eventing/interface.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package eventing - -import ( - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1" - internalinterfaces "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/internalinterfaces" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1alpha1 provides access to shared informers for resources in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.Interface. -func (g *group) V1alpha1() v1alpha1.Interface { - return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/pkg/client/generated/informers/externalversions/eventing/v1alpha1/interface.go b/pkg/client/generated/informers/externalversions/eventing/v1alpha1/interface.go deleted file mode 100644 index e3c4118b..00000000 --- a/pkg/client/generated/informers/externalversions/eventing/v1alpha1/interface.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - internalinterfaces "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // RedisBrokers returns a RedisBrokerInformer. - RedisBrokers() RedisBrokerInformer - // Triggers returns a TriggerInformer. - Triggers() TriggerInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// RedisBrokers returns a RedisBrokerInformer. -func (v *version) RedisBrokers() RedisBrokerInformer { - return &redisBrokerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// Triggers returns a TriggerInformer. -func (v *version) Triggers() TriggerInformer { - return &triggerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/pkg/client/generated/informers/externalversions/eventing/v1alpha1/redisbroker.go b/pkg/client/generated/informers/externalversions/eventing/v1alpha1/redisbroker.go deleted file mode 100644 index 41ea3d4d..00000000 --- a/pkg/client/generated/informers/externalversions/eventing/v1alpha1/redisbroker.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - internalclientset "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset" - internalinterfaces "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/eventing/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// RedisBrokerInformer provides access to a shared informer and lister for -// RedisBrokers. -type RedisBrokerInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.RedisBrokerLister -} - -type redisBrokerInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewRedisBrokerInformer constructs a new informer for RedisBroker type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewRedisBrokerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredRedisBrokerInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredRedisBrokerInformer constructs a new informer for RedisBroker type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredRedisBrokerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.EventingV1alpha1().RedisBrokers(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.EventingV1alpha1().RedisBrokers(namespace).Watch(context.TODO(), options) - }, - }, - &eventingv1alpha1.RedisBroker{}, - resyncPeriod, - indexers, - ) -} - -func (f *redisBrokerInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredRedisBrokerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *redisBrokerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&eventingv1alpha1.RedisBroker{}, f.defaultInformer) -} - -func (f *redisBrokerInformer) Lister() v1alpha1.RedisBrokerLister { - return v1alpha1.NewRedisBrokerLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/generated/informers/externalversions/eventing/v1alpha1/trigger.go b/pkg/client/generated/informers/externalversions/eventing/v1alpha1/trigger.go deleted file mode 100644 index ae169713..00000000 --- a/pkg/client/generated/informers/externalversions/eventing/v1alpha1/trigger.go +++ /dev/null @@ -1,74 +0,0 @@ -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - internalclientset "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset" - internalinterfaces "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/eventing/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// TriggerInformer provides access to a shared informer and lister for -// Triggers. -type TriggerInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.TriggerLister -} - -type triggerInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewTriggerInformer constructs a new informer for Trigger type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewTriggerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredTriggerInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredTriggerInformer constructs a new informer for Trigger type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredTriggerInformer(client internalclientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.EventingV1alpha1().Triggers(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.EventingV1alpha1().Triggers(namespace).Watch(context.TODO(), options) - }, - }, - &eventingv1alpha1.Trigger{}, - resyncPeriod, - indexers, - ) -} - -func (f *triggerInformer) defaultInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredTriggerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *triggerInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&eventingv1alpha1.Trigger{}, f.defaultInformer) -} - -func (f *triggerInformer) Lister() v1alpha1.TriggerLister { - return v1alpha1.NewTriggerLister(f.Informer().GetIndexer()) -} diff --git a/pkg/client/generated/informers/externalversions/factory.go b/pkg/client/generated/informers/externalversions/factory.go index 676c21e5..d7daa606 100644 --- a/pkg/client/generated/informers/externalversions/factory.go +++ b/pkg/client/generated/informers/externalversions/factory.go @@ -8,7 +8,6 @@ import ( time "time" internalclientset "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset" - eventing "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing" extensions "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/extensions" flow "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/flow" internalinterfaces "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/internalinterfaces" @@ -242,7 +241,6 @@ type SharedInformerFactory interface { // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer - Eventing() eventing.Interface Extensions() extensions.Interface Flow() flow.Interface Routing() routing.Interface @@ -250,10 +248,6 @@ type SharedInformerFactory interface { Targets() targets.Interface } -func (f *sharedInformerFactory) Eventing() eventing.Interface { - return eventing.New(f, f.namespace, f.tweakListOptions) -} - func (f *sharedInformerFactory) Extensions() extensions.Interface { return extensions.New(f, f.namespace, f.tweakListOptions) } diff --git a/pkg/client/generated/informers/externalversions/generic.go b/pkg/client/generated/informers/externalversions/generic.go index 73b881f5..e49a2dad 100644 --- a/pkg/client/generated/informers/externalversions/generic.go +++ b/pkg/client/generated/informers/externalversions/generic.go @@ -5,8 +5,7 @@ package externalversions import ( "fmt" - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - extensionsv1alpha1 "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" + v1alpha1 "github.com/zeiss/typhoon/pkg/apis/extensions/v1alpha1" flowv1alpha1 "github.com/zeiss/typhoon/pkg/apis/flow/v1alpha1" routingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/routing/v1alpha1" sourcesv1alpha1 "github.com/zeiss/typhoon/pkg/apis/sources/v1alpha1" @@ -41,14 +40,8 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=eventing.typhoon.zeiss.com, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("redisbrokers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().RedisBrokers().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("triggers"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().Triggers().Informer()}, nil - - // Group=extensions.typhoon.zeiss.com, Version=v1alpha1 - case extensionsv1alpha1.SchemeGroupVersion.WithResource("functions"): + // Group=extensions.typhoon.zeiss.com, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("functions"): return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1alpha1().Functions().Informer()}, nil // Group=flow.typhoon.zeiss.com, Version=v1alpha1 diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/fake/fake.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/fake/fake.go deleted file mode 100644 index 4bc3a36e..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/fake/fake.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - - redisbroker "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker" - fake "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory/fake" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" -) - -var Get = redisbroker.Get - -func init() { - injection.Fake.RegisterInformer(withInformer) -} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := fake.Get(ctx) - inf := f.Eventing().V1alpha1().RedisBrokers() - return context.WithValue(ctx, redisbroker.Key{}, inf), inf.Informer() -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered/fake/fake.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered/fake/fake.go deleted file mode 100644 index e2805a82..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered/fake/fake.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - - filtered "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered" - factoryfiltered "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory/filtered" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -var Get = filtered.Get - -func init() { - injection.Fake.RegisterFilteredInformers(withInformer) -} - -func withInformer(ctx context.Context) (context.Context, []controller.Informer) { - untyped := ctx.Value(factoryfiltered.LabelKey{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch labelkey from context.") - } - labelSelectors := untyped.([]string) - infs := []controller.Informer{} - for _, selector := range labelSelectors { - f := factoryfiltered.Get(ctx, selector) - inf := f.Eventing().V1alpha1().RedisBrokers() - ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) - infs = append(infs, inf.Informer()) - } - return ctx, infs -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered/redisbroker.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered/redisbroker.go deleted file mode 100644 index 8e6cb398..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/filtered/redisbroker.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package filtered - -import ( - context "context" - - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1" - filtered "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory/filtered" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -func init() { - injection.Default.RegisterFilteredInformers(withInformer) -} - -// Key is used for associating the Informer inside the context.Context. -type Key struct { - Selector string -} - -func withInformer(ctx context.Context) (context.Context, []controller.Informer) { - untyped := ctx.Value(filtered.LabelKey{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch labelkey from context.") - } - labelSelectors := untyped.([]string) - infs := []controller.Informer{} - for _, selector := range labelSelectors { - f := filtered.Get(ctx, selector) - inf := f.Eventing().V1alpha1().RedisBrokers() - ctx = context.WithValue(ctx, Key{Selector: selector}, inf) - infs = append(infs, inf.Informer()) - } - return ctx, infs -} - -// Get extracts the typed informer from the context. -func Get(ctx context.Context, selector string) v1alpha1.RedisBrokerInformer { - untyped := ctx.Value(Key{Selector: selector}) - if untyped == nil { - logging.FromContext(ctx).Panicf( - "Unable to fetch github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1.RedisBrokerInformer with selector %s from context.", selector) - } - return untyped.(v1alpha1.RedisBrokerInformer) -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/redisbroker.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/redisbroker.go deleted file mode 100644 index cb36811d..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker/redisbroker.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package redisbroker - -import ( - context "context" - - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1" - factory "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -func init() { - injection.Default.RegisterInformer(withInformer) -} - -// Key is used for associating the Informer inside the context.Context. -type Key struct{} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := factory.Get(ctx) - inf := f.Eventing().V1alpha1().RedisBrokers() - return context.WithValue(ctx, Key{}, inf), inf.Informer() -} - -// Get extracts the typed informer from the context. -func Get(ctx context.Context) v1alpha1.RedisBrokerInformer { - untyped := ctx.Value(Key{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1.RedisBrokerInformer from context.") - } - return untyped.(v1alpha1.RedisBrokerInformer) -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/fake/fake.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/fake/fake.go deleted file mode 100644 index 284f2b36..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/fake/fake.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - - trigger "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger" - fake "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory/fake" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" -) - -var Get = trigger.Get - -func init() { - injection.Fake.RegisterInformer(withInformer) -} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := fake.Get(ctx) - inf := f.Eventing().V1alpha1().Triggers() - return context.WithValue(ctx, trigger.Key{}, inf), inf.Informer() -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered/fake/fake.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered/fake/fake.go deleted file mode 100644 index a840d591..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered/fake/fake.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - - filtered "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered" - factoryfiltered "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory/filtered" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -var Get = filtered.Get - -func init() { - injection.Fake.RegisterFilteredInformers(withInformer) -} - -func withInformer(ctx context.Context) (context.Context, []controller.Informer) { - untyped := ctx.Value(factoryfiltered.LabelKey{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch labelkey from context.") - } - labelSelectors := untyped.([]string) - infs := []controller.Informer{} - for _, selector := range labelSelectors { - f := factoryfiltered.Get(ctx, selector) - inf := f.Eventing().V1alpha1().Triggers() - ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) - infs = append(infs, inf.Informer()) - } - return ctx, infs -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered/trigger.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered/trigger.go deleted file mode 100644 index 4bced49b..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/filtered/trigger.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package filtered - -import ( - context "context" - - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1" - filtered "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory/filtered" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -func init() { - injection.Default.RegisterFilteredInformers(withInformer) -} - -// Key is used for associating the Informer inside the context.Context. -type Key struct { - Selector string -} - -func withInformer(ctx context.Context) (context.Context, []controller.Informer) { - untyped := ctx.Value(filtered.LabelKey{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch labelkey from context.") - } - labelSelectors := untyped.([]string) - infs := []controller.Informer{} - for _, selector := range labelSelectors { - f := filtered.Get(ctx, selector) - inf := f.Eventing().V1alpha1().Triggers() - ctx = context.WithValue(ctx, Key{Selector: selector}, inf) - infs = append(infs, inf.Informer()) - } - return ctx, infs -} - -// Get extracts the typed informer from the context. -func Get(ctx context.Context, selector string) v1alpha1.TriggerInformer { - untyped := ctx.Value(Key{Selector: selector}) - if untyped == nil { - logging.FromContext(ctx).Panicf( - "Unable to fetch github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1.TriggerInformer with selector %s from context.", selector) - } - return untyped.(v1alpha1.TriggerInformer) -} diff --git a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/trigger.go b/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/trigger.go deleted file mode 100644 index 0fa3aee3..00000000 --- a/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger/trigger.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package trigger - -import ( - context "context" - - v1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1" - factory "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/factory" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -func init() { - injection.Default.RegisterInformer(withInformer) -} - -// Key is used for associating the Informer inside the context.Context. -type Key struct{} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := factory.Get(ctx) - inf := f.Eventing().V1alpha1().Triggers() - return context.WithValue(ctx, Key{}, inf), inf.Informer() -} - -// Get extracts the typed informer from the context. -func Get(ctx context.Context) v1alpha1.TriggerInformer { - untyped := ctx.Value(Key{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch github.com/zeiss/typhoon/pkg/client/generated/informers/externalversions/eventing/v1alpha1.TriggerInformer from context.") - } - return untyped.(v1alpha1.TriggerInformer) -} diff --git a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/controller.go b/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/controller.go deleted file mode 100644 index c978c5bb..00000000 --- a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/controller.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package redisbroker - -import ( - context "context" - fmt "fmt" - reflect "reflect" - strings "strings" - - internalclientsetscheme "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/scheme" - client "github.com/zeiss/typhoon/pkg/client/generated/injection/client" - redisbroker "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker" - zap "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" - record "k8s.io/client-go/tools/record" - kubeclient "knative.dev/pkg/client/injection/kube/client" - controller "knative.dev/pkg/controller" - logging "knative.dev/pkg/logging" - logkey "knative.dev/pkg/logging/logkey" - reconciler "knative.dev/pkg/reconciler" -) - -const ( - defaultControllerAgentName = "redisbroker-controller" - defaultFinalizerName = "redisbrokers.eventing.typhoon.zeiss.com" -) - -// NewImpl returns a controller.Impl that handles queuing and feeding work from -// the queue through an implementation of controller.Reconciler, delegating to -// the provided Interface and optional Finalizer methods. OptionsFn is used to return -// controller.ControllerOptions to be used by the internal reconciler. -func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { - logger := logging.FromContext(ctx) - - // Check the options function input. It should be 0 or 1. - if len(optionsFns) > 1 { - logger.Fatal("Up to one options function is supported, found: ", len(optionsFns)) - } - - redisbrokerInformer := redisbroker.Get(ctx) - - lister := redisbrokerInformer.Lister() - - var promoteFilterFunc func(obj interface{}) bool - var promoteFunc = func(bkt reconciler.Bucket) {} - - rec := &reconcilerImpl{ - LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ - PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { - - // Signal promotion event - promoteFunc(bkt) - - all, err := lister.List(labels.Everything()) - if err != nil { - return err - } - for _, elt := range all { - if promoteFilterFunc != nil { - if ok := promoteFilterFunc(elt); !ok { - continue - } - } - enq(bkt, types.NamespacedName{ - Namespace: elt.GetNamespace(), - Name: elt.GetName(), - }) - } - return nil - }, - }, - Client: client.Get(ctx), - Lister: lister, - reconciler: r, - finalizerName: defaultFinalizerName, - } - - ctrType := reflect.TypeOf(r).Elem() - ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name()) - ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".") - - logger = logger.With( - zap.String(logkey.ControllerType, ctrTypeName), - zap.String(logkey.Kind, "eventing.typhoon.zeiss.com.RedisBroker"), - ) - - impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger}) - agentName := defaultControllerAgentName - - // Pass impl to the options. Save any optional results. - for _, fn := range optionsFns { - opts := fn(impl) - if opts.ConfigStore != nil { - rec.configStore = opts.ConfigStore - } - if opts.FinalizerName != "" { - rec.finalizerName = opts.FinalizerName - } - if opts.AgentName != "" { - agentName = opts.AgentName - } - if opts.SkipStatusUpdates { - rec.skipStatusUpdates = true - } - if opts.DemoteFunc != nil { - rec.DemoteFunc = opts.DemoteFunc - } - if opts.PromoteFilterFunc != nil { - promoteFilterFunc = opts.PromoteFilterFunc - } - if opts.PromoteFunc != nil { - promoteFunc = opts.PromoteFunc - } - } - - rec.Recorder = createRecorder(ctx, agentName) - - return impl -} - -func createRecorder(ctx context.Context, agentName string) record.EventRecorder { - logger := logging.FromContext(ctx) - - recorder := controller.GetEventRecorder(ctx) - if recorder == nil { - // Create event broadcaster - logger.Debug("Creating event broadcaster") - eventBroadcaster := record.NewBroadcaster() - watches := []watch.Interface{ - eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), - eventBroadcaster.StartRecordingToSink( - &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), - } - recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) - go func() { - <-ctx.Done() - for _, w := range watches { - w.Stop() - } - }() - } - - return recorder -} - -func init() { - internalclientsetscheme.AddToScheme(scheme.Scheme) -} diff --git a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/reconciler.go b/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/reconciler.go deleted file mode 100644 index b2e33785..00000000 --- a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/reconciler.go +++ /dev/null @@ -1,424 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package redisbroker - -import ( - context "context" - json "encoding/json" - fmt "fmt" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - internalclientset "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset" - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/eventing/v1alpha1" - zap "go.uber.org/zap" - "go.uber.org/zap/zapcore" - v1 "k8s.io/api/core/v1" - equality "k8s.io/apimachinery/pkg/api/equality" - errors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - sets "k8s.io/apimachinery/pkg/util/sets" - record "k8s.io/client-go/tools/record" - controller "knative.dev/pkg/controller" - kmp "knative.dev/pkg/kmp" - logging "knative.dev/pkg/logging" - reconciler "knative.dev/pkg/reconciler" -) - -// Interface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1alpha1.RedisBroker. -type Interface interface { - // ReconcileKind implements custom logic to reconcile v1alpha1.RedisBroker. Any changes - // to the objects .Status or .Finalizers will be propagated to the stored - // object. It is recommended that implementors do not call any update calls - // for the Kind inside of ReconcileKind, it is the responsibility of the calling - // controller to propagate those properties. The resource passed to ReconcileKind - // will always have an empty deletion timestamp. - ReconcileKind(ctx context.Context, o *v1alpha1.RedisBroker) reconciler.Event -} - -// Finalizer defines the strongly typed interfaces to be implemented by a -// controller finalizing v1alpha1.RedisBroker. -type Finalizer interface { - // FinalizeKind implements custom logic to finalize v1alpha1.RedisBroker. Any changes - // to the objects .Status or .Finalizers will be ignored. Returning a nil or - // Normal type reconciler.Event will allow the finalizer to be deleted on - // the resource. The resource passed to FinalizeKind will always have a set - // deletion timestamp. - FinalizeKind(ctx context.Context, o *v1alpha1.RedisBroker) reconciler.Event -} - -// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1alpha1.RedisBroker if they want to process resources for which -// they are not the leader. -type ReadOnlyInterface interface { - // ObserveKind implements logic to observe v1alpha1.RedisBroker. - // This method should not write to the API. - ObserveKind(ctx context.Context, o *v1alpha1.RedisBroker) reconciler.Event -} - -type doReconcile func(ctx context.Context, o *v1alpha1.RedisBroker) reconciler.Event - -// reconcilerImpl implements controller.Reconciler for v1alpha1.RedisBroker resources. -type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. - reconciler.LeaderAwareFuncs - - // Client is used to write back status updates. - Client internalclientset.Interface - - // Listers index properties about resources. - Lister eventingv1alpha1.RedisBrokerLister - - // Recorder is an event recorder for recording Event resources to the - // Kubernetes API. - Recorder record.EventRecorder - - // configStore allows for decorating a context with config maps. - // +optional - configStore reconciler.ConfigStore - - // reconciler is the implementation of the business logic of the resource. - reconciler Interface - - // finalizerName is the name of the finalizer to reconcile. - finalizerName string - - // skipStatusUpdates configures whether or not this reconciler automatically updates - // the status of the reconciled resource. - skipStatusUpdates bool -} - -// Check that our Reconciler implements controller.Reconciler. -var _ controller.Reconciler = (*reconcilerImpl)(nil) - -// Check that our generated Reconciler is always LeaderAware. -var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) - -func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client internalclientset.Interface, lister eventingv1alpha1.RedisBrokerLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { - // Check the options function input. It should be 0 or 1. - if len(options) > 1 { - logger.Fatal("Up to one options struct is supported, found: ", len(options)) - } - - // Fail fast when users inadvertently implement the other LeaderAware interface. - // For the typed reconcilers, Promote shouldn't take any arguments. - if _, ok := r.(reconciler.LeaderAware); ok { - logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) - } - - rec := &reconcilerImpl{ - LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ - PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { - all, err := lister.List(labels.Everything()) - if err != nil { - return err - } - for _, elt := range all { - // TODO: Consider letting users specify a filter in options. - enq(bkt, types.NamespacedName{ - Namespace: elt.GetNamespace(), - Name: elt.GetName(), - }) - } - return nil - }, - }, - Client: client, - Lister: lister, - Recorder: recorder, - reconciler: r, - finalizerName: defaultFinalizerName, - } - - for _, opts := range options { - if opts.ConfigStore != nil { - rec.configStore = opts.ConfigStore - } - if opts.FinalizerName != "" { - rec.finalizerName = opts.FinalizerName - } - if opts.SkipStatusUpdates { - rec.skipStatusUpdates = true - } - if opts.DemoteFunc != nil { - rec.DemoteFunc = opts.DemoteFunc - } - } - - return rec -} - -// Reconcile implements controller.Reconciler -func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { - logger := logging.FromContext(ctx) - - // Initialize the reconciler state. This will convert the namespace/name - // string into a distinct namespace and name, determine if this instance of - // the reconciler is the leader, and any additional interfaces implemented - // by the reconciler. Returns an error is the resource key is invalid. - s, err := newState(key, r) - if err != nil { - logger.Error("Invalid resource key: ", key) - return nil - } - - // If we are not the leader, and we don't implement either ReadOnly - // observer interfaces, then take a fast-path out. - if s.isNotLeaderNorObserver() { - return controller.NewSkipKey(key) - } - - // If configStore is set, attach the frozen configuration to the context. - if r.configStore != nil { - ctx = r.configStore.ToContext(ctx) - } - - // Add the recorder to context. - ctx = controller.WithEventRecorder(ctx, r.Recorder) - - // Get the resource with this namespace/name. - - getter := r.Lister.RedisBrokers(s.namespace) - - original, err := getter.Get(s.name) - - if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing and call - // the ObserveDeletion handler if appropriate. - logger.Debugf("Resource %q no longer exists", key) - if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { - return del.ObserveDeletion(ctx, types.NamespacedName{ - Namespace: s.namespace, - Name: s.name, - }) - } - return nil - } else if err != nil { - return err - } - - // Don't modify the informers copy. - resource := original.DeepCopy() - - var reconcileEvent reconciler.Event - - name, do := s.reconcileMethodFor(resource) - // Append the target method to the logger. - logger = logger.With(zap.String("targetMethod", name)) - switch name { - case reconciler.DoReconcileKind: - // Set and update the finalizer on resource if r.reconciler - // implements Finalizer. - if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { - return fmt.Errorf("failed to set finalizers: %w", err) - } - - if !r.skipStatusUpdates { - reconciler.PreProcessReconcile(ctx, resource) - } - - // Reconcile this copy of the resource and then write back any status - // updates regardless of whether the reconciliation errored out. - reconcileEvent = do(ctx, resource) - - if !r.skipStatusUpdates { - reconciler.PostProcessReconcile(ctx, resource, original) - } - - case reconciler.DoFinalizeKind: - // For finalizing reconcilers, if this resource being marked for deletion - // and reconciled cleanly (nil or normal event), remove the finalizer. - reconcileEvent = do(ctx, resource) - - if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { - return fmt.Errorf("failed to clear finalizers: %w", err) - } - - case reconciler.DoObserveKind: - // Observe any changes to this resource, since we are not the leader. - reconcileEvent = do(ctx, resource) - - } - - // Synchronize the status. - switch { - case r.skipStatusUpdates: - // This reconciler implementation is configured to skip resource updates. - // This may mean this reconciler does not observe spec, but reconciles external changes. - case equality.Semantic.DeepEqual(original.Status, resource.Status): - // If we didn't change anything then don't call updateStatus. - // This is important because the copy we loaded from the injectionInformer's - // cache may be stale and we don't want to overwrite a prior update - // to status with this stale state. - case !s.isLeader: - // High-availability reconcilers may have many replicas watching the resource, but only - // the elected leader is expected to write modifications. - logger.Warn("Saw status changes when we aren't the leader!") - default: - if err = r.updateStatus(ctx, logger, original, resource); err != nil { - logger.Warnw("Failed to update resource status", zap.Error(err)) - r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", - "Failed to update status for %q: %v", resource.Name, err) - return err - } - } - - // Report the reconciler event, if any. - if reconcileEvent != nil { - var event *reconciler.ReconcilerEvent - if reconciler.EventAs(reconcileEvent, &event) { - logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) - - // the event was wrapped inside an error, consider the reconciliation as failed - if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { - return reconcileEvent - } - return nil - } - - if controller.IsSkipKey(reconcileEvent) { - // This is a wrapped error, don't emit an event. - } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { - // This is a wrapped error, don't emit an event. - } else { - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) - } - return reconcileEvent - } - - return nil -} - -func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1alpha1.RedisBroker, desired *v1alpha1.RedisBroker) error { - existing = existing.DeepCopy() - return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { - // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. - if attempts > 0 { - - getter := r.Client.EventingV1alpha1().RedisBrokers(desired.Namespace) - - existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) - if err != nil { - return err - } - } - - // If there's nothing to update, just return. - if equality.Semantic.DeepEqual(existing.Status, desired.Status) { - return nil - } - - if logger.Desugar().Core().Enabled(zapcore.DebugLevel) { - if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { - logger.Debug("Updating status with: ", diff) - } - } - - existing.Status = desired.Status - - updater := r.Client.EventingV1alpha1().RedisBrokers(existing.Namespace) - - _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) - return err - }) -} - -// updateFinalizersFiltered will update the Finalizers of the resource. -// TODO: this method could be generic and sync all finalizers. For now it only -// updates defaultFinalizerName or its override. -func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.RedisBroker, desiredFinalizers sets.Set[string]) (*v1alpha1.RedisBroker, error) { - // Don't modify the informers copy. - existing := resource.DeepCopy() - - var finalizers []string - - // If there's nothing to update, just return. - existingFinalizers := sets.New[string](existing.Finalizers...) - - if desiredFinalizers.Has(r.finalizerName) { - if existingFinalizers.Has(r.finalizerName) { - // Nothing to do. - return resource, nil - } - // Add the finalizer. - finalizers = append(existing.Finalizers, r.finalizerName) - } else { - if !existingFinalizers.Has(r.finalizerName) { - // Nothing to do. - return resource, nil - } - // Remove the finalizer. - existingFinalizers.Delete(r.finalizerName) - finalizers = sets.List(existingFinalizers) - } - - mergePatch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": finalizers, - "resourceVersion": existing.ResourceVersion, - }, - } - - patch, err := json.Marshal(mergePatch) - if err != nil { - return resource, err - } - - patcher := r.Client.EventingV1alpha1().RedisBrokers(resource.Namespace) - - resourceName := resource.Name - updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) - if err != nil { - r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", - "Failed to update finalizers for %q: %v", resourceName, err) - } else { - r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", - "Updated %q finalizers", resource.GetName()) - } - return updated, err -} - -func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.RedisBroker) (*v1alpha1.RedisBroker, error) { - if _, ok := r.reconciler.(Finalizer); !ok { - return resource, nil - } - - finalizers := sets.New[string](resource.Finalizers...) - - // If this resource is not being deleted, mark the finalizer. - if resource.GetDeletionTimestamp().IsZero() { - finalizers.Insert(r.finalizerName) - } - - // Synchronize the finalizers filtered by r.finalizerName. - return r.updateFinalizersFiltered(ctx, resource, finalizers) -} - -func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.RedisBroker, reconcileEvent reconciler.Event) (*v1alpha1.RedisBroker, error) { - if _, ok := r.reconciler.(Finalizer); !ok { - return resource, nil - } - if resource.GetDeletionTimestamp().IsZero() { - return resource, nil - } - - finalizers := sets.New[string](resource.Finalizers...) - - if reconcileEvent != nil { - var event *reconciler.ReconcilerEvent - if reconciler.EventAs(reconcileEvent, &event) { - if event.EventType == v1.EventTypeNormal { - finalizers.Delete(r.finalizerName) - } - } - } else { - finalizers.Delete(r.finalizerName) - } - - // Synchronize the finalizers filtered by r.finalizerName. - return r.updateFinalizersFiltered(ctx, resource, finalizers) -} diff --git a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/state.go b/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/state.go deleted file mode 100644 index ca001e7b..00000000 --- a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker/state.go +++ /dev/null @@ -1,81 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package redisbroker - -import ( - fmt "fmt" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - types "k8s.io/apimachinery/pkg/types" - cache "k8s.io/client-go/tools/cache" - reconciler "knative.dev/pkg/reconciler" -) - -// state is used to track the state of a reconciler in a single run. -type state struct { - // key is the original reconciliation key from the queue. - key string - // namespace is the namespace split from the reconciliation key. - namespace string - // name is the name split from the reconciliation key. - name string - // reconciler is the reconciler. - reconciler Interface - // roi is the read only interface cast of the reconciler. - roi ReadOnlyInterface - // isROI (Read Only Interface) the reconciler only observes reconciliation. - isROI bool - // isLeader the instance of the reconciler is the elected leader. - isLeader bool -} - -func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name. - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return nil, fmt.Errorf("invalid resource key: %s", key) - } - - roi, isROI := r.reconciler.(ReadOnlyInterface) - - isLeader := r.IsLeaderFor(types.NamespacedName{ - Namespace: namespace, - Name: name, - }) - - return &state{ - key: key, - namespace: namespace, - name: name, - reconciler: r.reconciler, - roi: roi, - isROI: isROI, - isLeader: isLeader, - }, nil -} - -// isNotLeaderNorObserver checks to see if this reconciler with the current -// state is enabled to do any work or not. -// isNotLeaderNorObserver returns true when there is no work possible for the -// reconciler. -func (s *state) isNotLeaderNorObserver() bool { - if !s.isLeader && !s.isROI { - // If we are not the leader, and we don't implement the ReadOnly - // interface, then take a fast-path out. - return true - } - return false -} - -func (s *state) reconcileMethodFor(o *v1alpha1.RedisBroker) (string, doReconcile) { - if o.GetDeletionTimestamp().IsZero() { - if s.isLeader { - return reconciler.DoReconcileKind, s.reconciler.ReconcileKind - } else if s.isROI { - return reconciler.DoObserveKind, s.roi.ObserveKind - } - } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok { - return reconciler.DoFinalizeKind, fin.FinalizeKind - } - return "unknown", nil -} diff --git a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/controller.go b/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/controller.go deleted file mode 100644 index 2371077f..00000000 --- a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/controller.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package trigger - -import ( - context "context" - fmt "fmt" - reflect "reflect" - strings "strings" - - internalclientsetscheme "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset/scheme" - client "github.com/zeiss/typhoon/pkg/client/generated/injection/client" - trigger "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger" - zap "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" - record "k8s.io/client-go/tools/record" - kubeclient "knative.dev/pkg/client/injection/kube/client" - controller "knative.dev/pkg/controller" - logging "knative.dev/pkg/logging" - logkey "knative.dev/pkg/logging/logkey" - reconciler "knative.dev/pkg/reconciler" -) - -const ( - defaultControllerAgentName = "trigger-controller" - defaultFinalizerName = "triggers.eventing.typhoon.zeiss.com" -) - -// NewImpl returns a controller.Impl that handles queuing and feeding work from -// the queue through an implementation of controller.Reconciler, delegating to -// the provided Interface and optional Finalizer methods. OptionsFn is used to return -// controller.ControllerOptions to be used by the internal reconciler. -func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { - logger := logging.FromContext(ctx) - - // Check the options function input. It should be 0 or 1. - if len(optionsFns) > 1 { - logger.Fatal("Up to one options function is supported, found: ", len(optionsFns)) - } - - triggerInformer := trigger.Get(ctx) - - lister := triggerInformer.Lister() - - var promoteFilterFunc func(obj interface{}) bool - var promoteFunc = func(bkt reconciler.Bucket) {} - - rec := &reconcilerImpl{ - LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ - PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { - - // Signal promotion event - promoteFunc(bkt) - - all, err := lister.List(labels.Everything()) - if err != nil { - return err - } - for _, elt := range all { - if promoteFilterFunc != nil { - if ok := promoteFilterFunc(elt); !ok { - continue - } - } - enq(bkt, types.NamespacedName{ - Namespace: elt.GetNamespace(), - Name: elt.GetName(), - }) - } - return nil - }, - }, - Client: client.Get(ctx), - Lister: lister, - reconciler: r, - finalizerName: defaultFinalizerName, - } - - ctrType := reflect.TypeOf(r).Elem() - ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name()) - ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".") - - logger = logger.With( - zap.String(logkey.ControllerType, ctrTypeName), - zap.String(logkey.Kind, "eventing.typhoon.zeiss.com.Trigger"), - ) - - impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger}) - agentName := defaultControllerAgentName - - // Pass impl to the options. Save any optional results. - for _, fn := range optionsFns { - opts := fn(impl) - if opts.ConfigStore != nil { - rec.configStore = opts.ConfigStore - } - if opts.FinalizerName != "" { - rec.finalizerName = opts.FinalizerName - } - if opts.AgentName != "" { - agentName = opts.AgentName - } - if opts.SkipStatusUpdates { - rec.skipStatusUpdates = true - } - if opts.DemoteFunc != nil { - rec.DemoteFunc = opts.DemoteFunc - } - if opts.PromoteFilterFunc != nil { - promoteFilterFunc = opts.PromoteFilterFunc - } - if opts.PromoteFunc != nil { - promoteFunc = opts.PromoteFunc - } - } - - rec.Recorder = createRecorder(ctx, agentName) - - return impl -} - -func createRecorder(ctx context.Context, agentName string) record.EventRecorder { - logger := logging.FromContext(ctx) - - recorder := controller.GetEventRecorder(ctx) - if recorder == nil { - // Create event broadcaster - logger.Debug("Creating event broadcaster") - eventBroadcaster := record.NewBroadcaster() - watches := []watch.Interface{ - eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), - eventBroadcaster.StartRecordingToSink( - &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), - } - recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) - go func() { - <-ctx.Done() - for _, w := range watches { - w.Stop() - } - }() - } - - return recorder -} - -func init() { - internalclientsetscheme.AddToScheme(scheme.Scheme) -} diff --git a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/reconciler.go b/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/reconciler.go deleted file mode 100644 index 0cd8697d..00000000 --- a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/reconciler.go +++ /dev/null @@ -1,424 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package trigger - -import ( - context "context" - json "encoding/json" - fmt "fmt" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - internalclientset "github.com/zeiss/typhoon/pkg/client/generated/clientset/internalclientset" - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/client/generated/listers/eventing/v1alpha1" - zap "go.uber.org/zap" - "go.uber.org/zap/zapcore" - v1 "k8s.io/api/core/v1" - equality "k8s.io/apimachinery/pkg/api/equality" - errors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - sets "k8s.io/apimachinery/pkg/util/sets" - record "k8s.io/client-go/tools/record" - controller "knative.dev/pkg/controller" - kmp "knative.dev/pkg/kmp" - logging "knative.dev/pkg/logging" - reconciler "knative.dev/pkg/reconciler" -) - -// Interface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1alpha1.Trigger. -type Interface interface { - // ReconcileKind implements custom logic to reconcile v1alpha1.Trigger. Any changes - // to the objects .Status or .Finalizers will be propagated to the stored - // object. It is recommended that implementors do not call any update calls - // for the Kind inside of ReconcileKind, it is the responsibility of the calling - // controller to propagate those properties. The resource passed to ReconcileKind - // will always have an empty deletion timestamp. - ReconcileKind(ctx context.Context, o *v1alpha1.Trigger) reconciler.Event -} - -// Finalizer defines the strongly typed interfaces to be implemented by a -// controller finalizing v1alpha1.Trigger. -type Finalizer interface { - // FinalizeKind implements custom logic to finalize v1alpha1.Trigger. Any changes - // to the objects .Status or .Finalizers will be ignored. Returning a nil or - // Normal type reconciler.Event will allow the finalizer to be deleted on - // the resource. The resource passed to FinalizeKind will always have a set - // deletion timestamp. - FinalizeKind(ctx context.Context, o *v1alpha1.Trigger) reconciler.Event -} - -// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1alpha1.Trigger if they want to process resources for which -// they are not the leader. -type ReadOnlyInterface interface { - // ObserveKind implements logic to observe v1alpha1.Trigger. - // This method should not write to the API. - ObserveKind(ctx context.Context, o *v1alpha1.Trigger) reconciler.Event -} - -type doReconcile func(ctx context.Context, o *v1alpha1.Trigger) reconciler.Event - -// reconcilerImpl implements controller.Reconciler for v1alpha1.Trigger resources. -type reconcilerImpl struct { - // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. - reconciler.LeaderAwareFuncs - - // Client is used to write back status updates. - Client internalclientset.Interface - - // Listers index properties about resources. - Lister eventingv1alpha1.TriggerLister - - // Recorder is an event recorder for recording Event resources to the - // Kubernetes API. - Recorder record.EventRecorder - - // configStore allows for decorating a context with config maps. - // +optional - configStore reconciler.ConfigStore - - // reconciler is the implementation of the business logic of the resource. - reconciler Interface - - // finalizerName is the name of the finalizer to reconcile. - finalizerName string - - // skipStatusUpdates configures whether or not this reconciler automatically updates - // the status of the reconciled resource. - skipStatusUpdates bool -} - -// Check that our Reconciler implements controller.Reconciler. -var _ controller.Reconciler = (*reconcilerImpl)(nil) - -// Check that our generated Reconciler is always LeaderAware. -var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) - -func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client internalclientset.Interface, lister eventingv1alpha1.TriggerLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { - // Check the options function input. It should be 0 or 1. - if len(options) > 1 { - logger.Fatal("Up to one options struct is supported, found: ", len(options)) - } - - // Fail fast when users inadvertently implement the other LeaderAware interface. - // For the typed reconcilers, Promote shouldn't take any arguments. - if _, ok := r.(reconciler.LeaderAware); ok { - logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) - } - - rec := &reconcilerImpl{ - LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ - PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { - all, err := lister.List(labels.Everything()) - if err != nil { - return err - } - for _, elt := range all { - // TODO: Consider letting users specify a filter in options. - enq(bkt, types.NamespacedName{ - Namespace: elt.GetNamespace(), - Name: elt.GetName(), - }) - } - return nil - }, - }, - Client: client, - Lister: lister, - Recorder: recorder, - reconciler: r, - finalizerName: defaultFinalizerName, - } - - for _, opts := range options { - if opts.ConfigStore != nil { - rec.configStore = opts.ConfigStore - } - if opts.FinalizerName != "" { - rec.finalizerName = opts.FinalizerName - } - if opts.SkipStatusUpdates { - rec.skipStatusUpdates = true - } - if opts.DemoteFunc != nil { - rec.DemoteFunc = opts.DemoteFunc - } - } - - return rec -} - -// Reconcile implements controller.Reconciler -func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { - logger := logging.FromContext(ctx) - - // Initialize the reconciler state. This will convert the namespace/name - // string into a distinct namespace and name, determine if this instance of - // the reconciler is the leader, and any additional interfaces implemented - // by the reconciler. Returns an error is the resource key is invalid. - s, err := newState(key, r) - if err != nil { - logger.Error("Invalid resource key: ", key) - return nil - } - - // If we are not the leader, and we don't implement either ReadOnly - // observer interfaces, then take a fast-path out. - if s.isNotLeaderNorObserver() { - return controller.NewSkipKey(key) - } - - // If configStore is set, attach the frozen configuration to the context. - if r.configStore != nil { - ctx = r.configStore.ToContext(ctx) - } - - // Add the recorder to context. - ctx = controller.WithEventRecorder(ctx, r.Recorder) - - // Get the resource with this namespace/name. - - getter := r.Lister.Triggers(s.namespace) - - original, err := getter.Get(s.name) - - if errors.IsNotFound(err) { - // The resource may no longer exist, in which case we stop processing and call - // the ObserveDeletion handler if appropriate. - logger.Debugf("Resource %q no longer exists", key) - if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { - return del.ObserveDeletion(ctx, types.NamespacedName{ - Namespace: s.namespace, - Name: s.name, - }) - } - return nil - } else if err != nil { - return err - } - - // Don't modify the informers copy. - resource := original.DeepCopy() - - var reconcileEvent reconciler.Event - - name, do := s.reconcileMethodFor(resource) - // Append the target method to the logger. - logger = logger.With(zap.String("targetMethod", name)) - switch name { - case reconciler.DoReconcileKind: - // Set and update the finalizer on resource if r.reconciler - // implements Finalizer. - if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { - return fmt.Errorf("failed to set finalizers: %w", err) - } - - if !r.skipStatusUpdates { - reconciler.PreProcessReconcile(ctx, resource) - } - - // Reconcile this copy of the resource and then write back any status - // updates regardless of whether the reconciliation errored out. - reconcileEvent = do(ctx, resource) - - if !r.skipStatusUpdates { - reconciler.PostProcessReconcile(ctx, resource, original) - } - - case reconciler.DoFinalizeKind: - // For finalizing reconcilers, if this resource being marked for deletion - // and reconciled cleanly (nil or normal event), remove the finalizer. - reconcileEvent = do(ctx, resource) - - if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { - return fmt.Errorf("failed to clear finalizers: %w", err) - } - - case reconciler.DoObserveKind: - // Observe any changes to this resource, since we are not the leader. - reconcileEvent = do(ctx, resource) - - } - - // Synchronize the status. - switch { - case r.skipStatusUpdates: - // This reconciler implementation is configured to skip resource updates. - // This may mean this reconciler does not observe spec, but reconciles external changes. - case equality.Semantic.DeepEqual(original.Status, resource.Status): - // If we didn't change anything then don't call updateStatus. - // This is important because the copy we loaded from the injectionInformer's - // cache may be stale and we don't want to overwrite a prior update - // to status with this stale state. - case !s.isLeader: - // High-availability reconcilers may have many replicas watching the resource, but only - // the elected leader is expected to write modifications. - logger.Warn("Saw status changes when we aren't the leader!") - default: - if err = r.updateStatus(ctx, logger, original, resource); err != nil { - logger.Warnw("Failed to update resource status", zap.Error(err)) - r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", - "Failed to update status for %q: %v", resource.Name, err) - return err - } - } - - // Report the reconciler event, if any. - if reconcileEvent != nil { - var event *reconciler.ReconcilerEvent - if reconciler.EventAs(reconcileEvent, &event) { - logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) - r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) - - // the event was wrapped inside an error, consider the reconciliation as failed - if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { - return reconcileEvent - } - return nil - } - - if controller.IsSkipKey(reconcileEvent) { - // This is a wrapped error, don't emit an event. - } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { - // This is a wrapped error, don't emit an event. - } else { - logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) - } - return reconcileEvent - } - - return nil -} - -func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1alpha1.Trigger, desired *v1alpha1.Trigger) error { - existing = existing.DeepCopy() - return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { - // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. - if attempts > 0 { - - getter := r.Client.EventingV1alpha1().Triggers(desired.Namespace) - - existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) - if err != nil { - return err - } - } - - // If there's nothing to update, just return. - if equality.Semantic.DeepEqual(existing.Status, desired.Status) { - return nil - } - - if logger.Desugar().Core().Enabled(zapcore.DebugLevel) { - if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { - logger.Debug("Updating status with: ", diff) - } - } - - existing.Status = desired.Status - - updater := r.Client.EventingV1alpha1().Triggers(existing.Namespace) - - _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) - return err - }) -} - -// updateFinalizersFiltered will update the Finalizers of the resource. -// TODO: this method could be generic and sync all finalizers. For now it only -// updates defaultFinalizerName or its override. -func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1alpha1.Trigger, desiredFinalizers sets.Set[string]) (*v1alpha1.Trigger, error) { - // Don't modify the informers copy. - existing := resource.DeepCopy() - - var finalizers []string - - // If there's nothing to update, just return. - existingFinalizers := sets.New[string](existing.Finalizers...) - - if desiredFinalizers.Has(r.finalizerName) { - if existingFinalizers.Has(r.finalizerName) { - // Nothing to do. - return resource, nil - } - // Add the finalizer. - finalizers = append(existing.Finalizers, r.finalizerName) - } else { - if !existingFinalizers.Has(r.finalizerName) { - // Nothing to do. - return resource, nil - } - // Remove the finalizer. - existingFinalizers.Delete(r.finalizerName) - finalizers = sets.List(existingFinalizers) - } - - mergePatch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": finalizers, - "resourceVersion": existing.ResourceVersion, - }, - } - - patch, err := json.Marshal(mergePatch) - if err != nil { - return resource, err - } - - patcher := r.Client.EventingV1alpha1().Triggers(resource.Namespace) - - resourceName := resource.Name - updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) - if err != nil { - r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", - "Failed to update finalizers for %q: %v", resourceName, err) - } else { - r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", - "Updated %q finalizers", resource.GetName()) - } - return updated, err -} - -func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1alpha1.Trigger) (*v1alpha1.Trigger, error) { - if _, ok := r.reconciler.(Finalizer); !ok { - return resource, nil - } - - finalizers := sets.New[string](resource.Finalizers...) - - // If this resource is not being deleted, mark the finalizer. - if resource.GetDeletionTimestamp().IsZero() { - finalizers.Insert(r.finalizerName) - } - - // Synchronize the finalizers filtered by r.finalizerName. - return r.updateFinalizersFiltered(ctx, resource, finalizers) -} - -func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1alpha1.Trigger, reconcileEvent reconciler.Event) (*v1alpha1.Trigger, error) { - if _, ok := r.reconciler.(Finalizer); !ok { - return resource, nil - } - if resource.GetDeletionTimestamp().IsZero() { - return resource, nil - } - - finalizers := sets.New[string](resource.Finalizers...) - - if reconcileEvent != nil { - var event *reconciler.ReconcilerEvent - if reconciler.EventAs(reconcileEvent, &event) { - if event.EventType == v1.EventTypeNormal { - finalizers.Delete(r.finalizerName) - } - } - } else { - finalizers.Delete(r.finalizerName) - } - - // Synchronize the finalizers filtered by r.finalizerName. - return r.updateFinalizersFiltered(ctx, resource, finalizers) -} diff --git a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/state.go b/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/state.go deleted file mode 100644 index 78fded8a..00000000 --- a/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger/state.go +++ /dev/null @@ -1,81 +0,0 @@ -// Code generated by injection-gen. DO NOT EDIT. - -package trigger - -import ( - fmt "fmt" - - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - types "k8s.io/apimachinery/pkg/types" - cache "k8s.io/client-go/tools/cache" - reconciler "knative.dev/pkg/reconciler" -) - -// state is used to track the state of a reconciler in a single run. -type state struct { - // key is the original reconciliation key from the queue. - key string - // namespace is the namespace split from the reconciliation key. - namespace string - // name is the name split from the reconciliation key. - name string - // reconciler is the reconciler. - reconciler Interface - // roi is the read only interface cast of the reconciler. - roi ReadOnlyInterface - // isROI (Read Only Interface) the reconciler only observes reconciliation. - isROI bool - // isLeader the instance of the reconciler is the elected leader. - isLeader bool -} - -func newState(key string, r *reconcilerImpl) (*state, error) { - // Convert the namespace/name string into a distinct namespace and name. - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return nil, fmt.Errorf("invalid resource key: %s", key) - } - - roi, isROI := r.reconciler.(ReadOnlyInterface) - - isLeader := r.IsLeaderFor(types.NamespacedName{ - Namespace: namespace, - Name: name, - }) - - return &state{ - key: key, - namespace: namespace, - name: name, - reconciler: r.reconciler, - roi: roi, - isROI: isROI, - isLeader: isLeader, - }, nil -} - -// isNotLeaderNorObserver checks to see if this reconciler with the current -// state is enabled to do any work or not. -// isNotLeaderNorObserver returns true when there is no work possible for the -// reconciler. -func (s *state) isNotLeaderNorObserver() bool { - if !s.isLeader && !s.isROI { - // If we are not the leader, and we don't implement the ReadOnly - // interface, then take a fast-path out. - return true - } - return false -} - -func (s *state) reconcileMethodFor(o *v1alpha1.Trigger) (string, doReconcile) { - if o.GetDeletionTimestamp().IsZero() { - if s.isLeader { - return reconciler.DoReconcileKind, s.reconciler.ReconcileKind - } else if s.isROI { - return reconciler.DoObserveKind, s.roi.ObserveKind - } - } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok { - return reconciler.DoFinalizeKind, fin.FinalizeKind - } - return "unknown", nil -} diff --git a/pkg/client/generated/listers/eventing/v1alpha1/expansion_generated.go b/pkg/client/generated/listers/eventing/v1alpha1/expansion_generated.go deleted file mode 100644 index d46694fa..00000000 --- a/pkg/client/generated/listers/eventing/v1alpha1/expansion_generated.go +++ /dev/null @@ -1,19 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -// RedisBrokerListerExpansion allows custom methods to be added to -// RedisBrokerLister. -type RedisBrokerListerExpansion interface{} - -// RedisBrokerNamespaceListerExpansion allows custom methods to be added to -// RedisBrokerNamespaceLister. -type RedisBrokerNamespaceListerExpansion interface{} - -// TriggerListerExpansion allows custom methods to be added to -// TriggerLister. -type TriggerListerExpansion interface{} - -// TriggerNamespaceListerExpansion allows custom methods to be added to -// TriggerNamespaceLister. -type TriggerNamespaceListerExpansion interface{} diff --git a/pkg/client/generated/listers/eventing/v1alpha1/redisbroker.go b/pkg/client/generated/listers/eventing/v1alpha1/redisbroker.go deleted file mode 100644 index 64957255..00000000 --- a/pkg/client/generated/listers/eventing/v1alpha1/redisbroker.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// RedisBrokerLister helps list RedisBrokers. -// All objects returned here must be treated as read-only. -type RedisBrokerLister interface { - // List lists all RedisBrokers in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RedisBroker, err error) - // RedisBrokers returns an object that can list and get RedisBrokers. - RedisBrokers(namespace string) RedisBrokerNamespaceLister - RedisBrokerListerExpansion -} - -// redisBrokerLister implements the RedisBrokerLister interface. -type redisBrokerLister struct { - indexer cache.Indexer -} - -// NewRedisBrokerLister returns a new RedisBrokerLister. -func NewRedisBrokerLister(indexer cache.Indexer) RedisBrokerLister { - return &redisBrokerLister{indexer: indexer} -} - -// List lists all RedisBrokers in the indexer. -func (s *redisBrokerLister) List(selector labels.Selector) (ret []*v1alpha1.RedisBroker, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RedisBroker)) - }) - return ret, err -} - -// RedisBrokers returns an object that can list and get RedisBrokers. -func (s *redisBrokerLister) RedisBrokers(namespace string) RedisBrokerNamespaceLister { - return redisBrokerNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// RedisBrokerNamespaceLister helps list and get RedisBrokers. -// All objects returned here must be treated as read-only. -type RedisBrokerNamespaceLister interface { - // List lists all RedisBrokers in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.RedisBroker, err error) - // Get retrieves the RedisBroker from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.RedisBroker, error) - RedisBrokerNamespaceListerExpansion -} - -// redisBrokerNamespaceLister implements the RedisBrokerNamespaceLister -// interface. -type redisBrokerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all RedisBrokers in the indexer for a given namespace. -func (s redisBrokerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RedisBroker, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.RedisBroker)) - }) - return ret, err -} - -// Get retrieves the RedisBroker from the indexer for a given namespace and name. -func (s redisBrokerNamespaceLister) Get(name string) (*v1alpha1.RedisBroker, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("redisbroker"), name) - } - return obj.(*v1alpha1.RedisBroker), nil -} diff --git a/pkg/client/generated/listers/eventing/v1alpha1/trigger.go b/pkg/client/generated/listers/eventing/v1alpha1/trigger.go deleted file mode 100644 index dd993886..00000000 --- a/pkg/client/generated/listers/eventing/v1alpha1/trigger.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// TriggerLister helps list Triggers. -// All objects returned here must be treated as read-only. -type TriggerLister interface { - // List lists all Triggers in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Trigger, err error) - // Triggers returns an object that can list and get Triggers. - Triggers(namespace string) TriggerNamespaceLister - TriggerListerExpansion -} - -// triggerLister implements the TriggerLister interface. -type triggerLister struct { - indexer cache.Indexer -} - -// NewTriggerLister returns a new TriggerLister. -func NewTriggerLister(indexer cache.Indexer) TriggerLister { - return &triggerLister{indexer: indexer} -} - -// List lists all Triggers in the indexer. -func (s *triggerLister) List(selector labels.Selector) (ret []*v1alpha1.Trigger, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Trigger)) - }) - return ret, err -} - -// Triggers returns an object that can list and get Triggers. -func (s *triggerLister) Triggers(namespace string) TriggerNamespaceLister { - return triggerNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// TriggerNamespaceLister helps list and get Triggers. -// All objects returned here must be treated as read-only. -type TriggerNamespaceLister interface { - // List lists all Triggers in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Trigger, err error) - // Get retrieves the Trigger from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Trigger, error) - TriggerNamespaceListerExpansion -} - -// triggerNamespaceLister implements the TriggerNamespaceLister -// interface. -type triggerNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Triggers in the indexer for a given namespace. -func (s triggerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Trigger, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Trigger)) - }) - return ret, err -} - -// Get retrieves the Trigger from the indexer for a given namespace and name. -func (s triggerNamespaceLister) Get(name string) (*v1alpha1.Trigger, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("trigger"), name) - } - return obj.(*v1alpha1.Trigger), nil -} diff --git a/pkg/reconciler/common/common.go b/pkg/reconciler/common/common.go deleted file mode 100644 index 6fdb363c..00000000 --- a/pkg/reconciler/common/common.go +++ /dev/null @@ -1,28 +0,0 @@ -package common - -import ( - "strings" - - "knative.dev/pkg/kmeta" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" -) - -const ( - configMapResourceSuffix = "status" - - // Name of the status key inside the Status ConfigMap - ConfigMapStatusKey = "status" -) - -func AppAnnotationValue(or kmeta.OwnerRefable) string { - return strings.ToLower(or.GetGroupVersionKind().Kind) -} - -func GetBrokerConfigMapName(b eventingv1alpha1.ReconcilableBroker) string { - if b == nil { - return "" - } - - return b.GetObjectMeta().GetName() + "-" + b.GetOwnedObjectsSuffix() + "-" + configMapResourceSuffix -} diff --git a/pkg/reconciler/common/events.go b/pkg/reconciler/common/events.go deleted file mode 100644 index 1b518f85..00000000 --- a/pkg/reconciler/common/events.go +++ /dev/null @@ -1,47 +0,0 @@ -package common - -// Reasons for API Events -const ( - ReasonDeploymentCreate = "CreateDeployment" - ReasonDeploymentUpdate = "UpdateDeployment" - ReasonFailedDeploymentGet = "FailedDeploymentGet" - ReasonFailedDeploymentCreate = "FailedDeploymentCreate" - ReasonFailedDeploymentUpdate = "FailedDeploymentUpdate" - - ReasonFailedSecretCompose = "FailedSecretCompose" - ReasonFailedSecretGet = "FailedSecretGet" - ReasonFailedSecretCreate = "FailedSecretCreate" - ReasonFailedSecretUpdate = "FailedSecretUpdate" - - ReasonStatusConfigMapGetFailed = "FailedConfigMapGet" - ReasonStatusConfigMapDoesNotExist = "FailedConfigMapDoesNotExist" - ReasonStatusConfigMapCreateFailed = "FailedConfigMapCreate" - ReasonStatusConfigMapReadFailed = "FailedConfigMapRead" - ReasonStatusSubscriptionFailed = "SubscriptionFailed" - ReasonStatusSubscriptionCompleted = "SubscriptionCompleted" - ReasonStatusSubscriptionUnknown = "SubscriptionUnknown" - ReasonStatusSubscriptionReady = "SubscriptionReady" - ReasonStatusSubscriptionRunning = "SubscriptionRunning" - - ReasonFailedServiceAccountGet = "FailedServiceAccountGet" - ReasonFailedServiceAccountCreate = "FailedServiceAccountCreate" - ReasonFailedRoleBindingGet = "FailedRoleBindingGet" - ReasonFailedRoleBindingCreate = "FailedRoleBindingCreate" - - ReasonServiceCreate = "CreateService" - ReasonServiceUpdate = "UpdateService" - ReasonFailedServiceGet = "FailedServiceGet" - ReasonFailedServiceCreate = "FailedServiceCreate" - ReasonFailedServiceUpdate = "FailedServiceUpdate" - - ReasonFailedTriggerList = "FailedTriggerList" - ReasonFailedConfigSerialize = "FailedConfigSerialize" - - ReasonUnavailableEndpoints = "UnavailableEndpoints" - ReasonFailedEndpointsGet = "FailedEndpointsGet" - - ReasonBrokerDoesNotExist = "BrokerDoesNotExist" - ReasonFailedBrokerGet = "FailedBrokerGet" - - ReasonFailedResolveReference = "FailedResolveReference" -) diff --git a/pkg/reconciler/common/reconcile_broker.go b/pkg/reconciler/common/reconcile_broker.go deleted file mode 100644 index 8afcd2e0..00000000 --- a/pkg/reconciler/common/reconcile_broker.go +++ /dev/null @@ -1,302 +0,0 @@ -package common - -import ( - "context" - "strconv" - - "go.uber.org/zap" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - appsv1listers "k8s.io/client-go/listers/apps/v1" - corev1listers "k8s.io/client-go/listers/core/v1" - "knative.dev/eventing/pkg/apis/duck" - k8sclient "knative.dev/pkg/client/injection/kube/client" - "knative.dev/pkg/logging" - pkgreconciler "knative.dev/pkg/reconciler" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/resource" - "github.com/zeiss/typhoon/pkg/reconciler/semantic" -) - -const ( - brokerResourceSuffix = "broker" - brokerDeploymentComponentLabel = "broker-deployment" - - // container ports must be >1024 to be able to bind them - // in unprivileged environments. - brokerContainerPort = 8080 - - defaultBrokerServicePort = 80 - metricsServicePort = 9090 -) - -type BrokerReconciler interface { - Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker, sa *corev1.ServiceAccount, secret *corev1.Secret, configMap *corev1.ConfigMap, do ...resource.ObjectOption) (*appsv1.Deployment, *corev1.Service, error) -} - -type brokerReconciler struct { - client kubernetes.Interface - deploymentLister appsv1listers.DeploymentLister - serviceLister corev1listers.ServiceLister - endpointsLister corev1listers.EndpointsLister - image string - // TODO remove when using releases - pullPolicy corev1.PullPolicy -} - -func NewBrokerReconciler(ctx context.Context, - deploymentLister appsv1listers.DeploymentLister, - serviceLister corev1listers.ServiceLister, - endpointsLister corev1listers.EndpointsLister, - image string, - pullPolicy corev1.PullPolicy, -) BrokerReconciler { - return &brokerReconciler{ - client: k8sclient.Get(ctx), - deploymentLister: deploymentLister, - serviceLister: serviceLister, - endpointsLister: endpointsLister, - image: image, - pullPolicy: pullPolicy, - } -} - -func (r *brokerReconciler) Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker, sa *corev1.ServiceAccount, secret *corev1.Secret, configMap *corev1.ConfigMap, deploymentOptions ...resource.ObjectOption) (*appsv1.Deployment, *corev1.Service, error) { - d, err := r.reconcileDeployment(ctx, rb, sa, secret, configMap, deploymentOptions) - if err != nil { - return nil, nil, err - } - - svc, err := r.reconcileService(ctx, rb) - if err != nil { - return d, nil, err - } - - _, err = r.reconcileEndpoints(ctx, svc, rb) - if err != nil { - return d, nil, err - } - - return d, svc, nil -} - -func buildBrokerDeployment(rb eventingv1alpha1.ReconcilableBroker, sa *corev1.ServiceAccount, secret *corev1.Secret, cm *corev1.ConfigMap, image string, pullPolicy corev1.PullPolicy, extraOptions ...resource.ObjectOption) *appsv1.Deployment { - meta := rb.GetObjectMeta() - ns, name := meta.GetNamespace(), meta.GetName() - bs := rb.GetReconcilableBrokerSpec() - - copts := []resource.ObjectOption{ - resource.ContainerAddArgs("start"), - resource.ContainerAddEnvFromValue("PORT", strconv.Itoa(int(brokerContainerPort))), - resource.ContainerAddEnvFromFieldRef("BROKER_NAME", "metadata.name"), - resource.ContainerAddEnvFromFieldRef("KUBERNETES_NAMESPACE", "metadata.namespace"), - resource.ContainerAddEnvFromValue("KUBERNETES_BROKER_CONFIG_SECRET_NAME", secret.Name), - resource.ContainerAddEnvFromValue("KUBERNETES_BROKER_CONFIG_SECRET_KEY", ConfigSecretKey), - resource.ContainerAddEnvFromValue("KUBERNETES_STATUS_CONFIGMAP_NAME", cm.Name), - resource.ContainerWithImagePullPolicy(pullPolicy), - resource.ContainerAddPort("httpce", brokerContainerPort), - resource.ContainerAddPort("metrics", metricsServicePort), - } - - if bs.Observability != nil && bs.Observability.ValueFromConfigMap != "" { - copts = append(copts, resource.ContainerAddEnvFromValue("KUBERNETES_OBSERVABILITY_CONFIGMAP_NAME", bs.Observability.ValueFromConfigMap)) - } - - dn := name + "-" + rb.GetOwnedObjectsSuffix() + "-" + brokerResourceSuffix - d := resource.NewDeployment(ns, dn, - resource.DeploymentWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, brokerDeploymentComponentLabel), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, dn), - resource.MetaAddOwner(meta, rb.GetGroupVersionKind())), - resource.DeploymentAddSelectorForTemplate(resource.AppComponentLabel, brokerDeploymentComponentLabel), - resource.DeploymentAddSelectorForTemplate(resource.AppInstanceLabel, dn), - resource.DeploymentSetReplicas(1), - resource.DeploymentWithTemplateSpecOptions( - // Needed for prometheus PodMonitor. - resource.PodTemplateSpecWithMetaOptions( - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - ), - resource.PodTemplateSpecWithPodSpecOptions( - resource.PodSpecWithServiceAccountName(sa.Name), - resource.PodSpecAddContainer( - resource.NewContainer("broker", image, copts...))))) - - if len(extraOptions) != 0 { - for _, o := range extraOptions { - o(d) - } - } - - return d -} - -func (r *brokerReconciler) reconcileDeployment( - ctx context.Context, - rb eventingv1alpha1.ReconcilableBroker, - sa *corev1.ServiceAccount, - secret *corev1.Secret, - configMap *corev1.ConfigMap, - deploymentOptions []resource.ObjectOption, -) (*appsv1.Deployment, error) { - desired := buildBrokerDeployment(rb, sa, secret, configMap, r.image, r.pullPolicy, deploymentOptions...) - current, err := r.deploymentLister.Deployments(desired.Namespace).Get(desired.Name) - - switch { - case err == nil: - // Compare current object with desired, update if needed. - if !semantic.Semantic.DeepEqual(desired, current) { - desired.Status = current.Status - desired.ResourceVersion = current.ResourceVersion - - current, err = r.client.AppsV1().Deployments(desired.Namespace).Update(ctx, desired, metav1.UpdateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to update broker deployment", zap.String("deployment", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerDeploymentFailed(ReasonFailedDeploymentUpdate, "Failed to update broker deployment") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedDeploymentUpdate, - "Failed to get broker deployment %s: %w", fullname, err) - } - } - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current deployment. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get broker deployment", zap.String("deployment", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerDeploymentFailed(ReasonFailedDeploymentGet, "Failed to get broker deployment") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedDeploymentGet, - "Failed to get broker deployment %s: %w", fullname, err) - - default: - // The deployment has not been found, create it. - current, err = r.client.AppsV1().Deployments(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create broker deployment", zap.String("deployment", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerDeploymentFailed(ReasonFailedDeploymentCreate, "Failed to create broker deployment") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedDeploymentCreate, - "Failed to create broker deployment %s: %w", fullname, err) - } - } - - // Update status based on deployment - rb.GetReconcilableBrokerStatus().PropagateBrokerDeploymentAvailability(ctx, ¤t.Status) - - return current, nil -} - -func buildBrokerService(rb eventingv1alpha1.ReconcilableBroker) *corev1.Service { - meta := rb.GetObjectMeta() - ns, name := meta.GetNamespace(), meta.GetName() - bs := rb.GetReconcilableBrokerSpec() - - brokerPort := defaultBrokerServicePort - if bs.Port != nil { - brokerPort = *bs.Port - } - - sn := name + "-" + rb.GetOwnedObjectsSuffix() + "-" + brokerResourceSuffix - return resource.NewService(ns, sn, - resource.ServiceWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "broker-service"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, sn), - resource.MetaAddOwner(meta, rb.GetGroupVersionKind())), - resource.ServiceSetType(corev1.ServiceTypeClusterIP), - resource.ServiceAddSelectorLabel(resource.AppComponentLabel, brokerDeploymentComponentLabel), - resource.ServiceAddSelectorLabel(resource.AppInstanceLabel, sn), - resource.ServiceAddPort("httpce", int32(brokerPort), brokerContainerPort)) -} - -func (r *brokerReconciler) reconcileService(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.Service, error) { - desired := buildBrokerService(rb) - current, err := r.serviceLister.Services(desired.Namespace).Get(desired.Name) - - switch { - case err == nil: - // Set Status - // Compare current object with desired, update if needed. - if !semantic.Semantic.DeepEqual(desired, current) { - desired.Status = current.Status - desired.ResourceVersion = current.ResourceVersion - - current, err = r.client.CoreV1().Services(desired.Namespace).Update(ctx, desired, metav1.UpdateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to update broker service", zap.String("service", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerServiceFailed(ReasonFailedServiceUpdate, "Failed to update broker service") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedServiceUpdate, - "Failed to get broker service %s: %w", fullname, err) - } - } - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current object. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get the service", zap.String("service", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerServiceFailed(ReasonFailedServiceGet, "Failed to get broker service") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedServiceGet, - "Failed to get broker service %s: %w", fullname, err) - - default: - // The object has not been found, create it. - current, err = r.client.CoreV1().Services(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create the service", zap.String("service", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerServiceFailed(ReasonFailedServiceCreate, "Failed to create broker service") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedServiceCreate, - "Failed to create broker service %s: %w", fullname, err) - } - } - - // Service exists and is up to date. - rb.GetReconcilableBrokerStatus().MarkBrokerServiceReady() - - return current, nil -} - -func (r *brokerReconciler) reconcileEndpoints(ctx context.Context, service *corev1.Service, rb eventingv1alpha1.ReconcilableBroker) (*corev1.Endpoints, error) { - ep, err := r.endpointsLister.Endpoints(service.Namespace).Get(service.Name) - switch { - case err == nil: - if duck.EndpointsAreAvailable(ep) { - rb.GetReconcilableBrokerStatus().MarkBrokerEndpointsTrue() - return ep, nil - } - - rb.GetReconcilableBrokerStatus().MarkBrokerEndpointsFailed(ReasonUnavailableEndpoints, "Endpoints for broker service are not available") - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonUnavailableEndpoints, - "Endpoints for broker service %q are not available", - types.NamespacedName{Namespace: ep.Namespace, Name: ep.Name}) - - case apierrs.IsNotFound(err): - rb.GetReconcilableBrokerStatus().MarkBrokerEndpointsFailed(ReasonUnavailableEndpoints, "Endpoints for broker service do not exist") - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonUnavailableEndpoints, - "Endpoints for broker service %q do not exist", - types.NamespacedName{Namespace: service.Namespace, Name: service.Name}) - } - - fullname := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - rb.GetReconcilableBrokerStatus().MarkBrokerEndpointsUnknown(ReasonFailedEndpointsGet, "Could not retrieve endpoints for broker service") - logging.FromContext(ctx).Error("Unable to get the broker service endpoints", zap.String("endpoint", fullname.String()), zap.Error(err)) - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedEndpointsGet, - "Failed to get broker service ednpoints %s: %w", fullname, err) -} diff --git a/pkg/reconciler/common/reconcile_configmap.go b/pkg/reconciler/common/reconcile_configmap.go deleted file mode 100644 index f05d2e01..00000000 --- a/pkg/reconciler/common/reconcile_configmap.go +++ /dev/null @@ -1,75 +0,0 @@ -package common - -import ( - "context" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/resource" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - corev1listers "k8s.io/client-go/listers/core/v1" - k8sclient "knative.dev/pkg/client/injection/kube/client" - pkgreconciler "knative.dev/pkg/reconciler" - // import the other required packages -) - -type ConfigMapReconciler interface { - Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.ConfigMap, error) -} - -type configMapReconciler struct { - client kubernetes.Interface - configMapLister corev1listers.ConfigMapLister -} - -var _ ConfigMapReconciler = (*configMapReconciler)(nil) - -func NewConfigMapReconciler(ctx context.Context, configMapLister corev1listers.ConfigMapLister) ConfigMapReconciler { - return &configMapReconciler{ - client: k8sclient.Get(ctx), - configMapLister: configMapLister, - } -} - -func (r *configMapReconciler) Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.ConfigMap, error) { - meta := rb.GetObjectMeta() - ns := meta.GetNamespace() - - configMapName := GetBrokerConfigMapName(rb) - - desired := resource.NewConfigMap(ns, configMapName, - resource.ConfigMapWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "broker-status"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, configMapName), - resource.MetaAddOwner(meta, rb.GetGroupVersionKind())), - ) - - _, err := r.configMapLister.ConfigMaps(desired.Namespace).Get(desired.Name) - switch { - case err == nil: - // We only require the ConfigMap to exist, no action needed. - - case apierrs.IsNotFound(err): - // The configMap has not been found, create it. - _, err = r.client.CoreV1().ConfigMaps(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - rb.GetReconcilableBrokerStatus().MarkStatusConfigFailed(ReasonStatusConfigMapCreateFailed, "Failed to create configMap for status reporting") - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonStatusConfigMapCreateFailed, - "Failed to create configMap for status reporting %s: %w", desired.Name, err) - } - - default: - rb.GetReconcilableBrokerStatus().MarkStatusConfigFailed(ReasonStatusConfigMapGetFailed, "Failed to get configMap for status reporting") - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonStatusConfigMapGetFailed, - "Failed to get configMap for status reporting %s: %w", desired.Name, err) - } - - rb.GetReconcilableBrokerStatus().MarkStatusConfigReady() - - return desired, nil -} diff --git a/pkg/reconciler/common/reconcile_secret.go b/pkg/reconciler/common/reconcile_secret.go deleted file mode 100644 index 8a90a8ca..00000000 --- a/pkg/reconciler/common/reconcile_secret.go +++ /dev/null @@ -1,212 +0,0 @@ -package common - -import ( - "context" - - "go.uber.org/zap" - "sigs.k8s.io/yaml" - - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - corev1listers "k8s.io/client-go/listers/core/v1" - duckv1 "knative.dev/eventing/pkg/apis/duck/v1" - k8sclient "knative.dev/pkg/client/injection/kube/client" - "knative.dev/pkg/logging" - pkgreconciler "knative.dev/pkg/reconciler" - - "github.com/zeiss/typhoon/pkg/brokers/config/broker" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - eventingv1alpha1listers "github.com/zeiss/typhoon/pkg/client/generated/listers/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/resource" - "github.com/zeiss/typhoon/pkg/reconciler/semantic" -) - -const ( - ConfigSecretKey = "config" - secretResourceSuffix = "config" -) - -type SecretReconciler interface { - Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.Secret, error) -} - -type secretReconciler struct { - client kubernetes.Interface - secretLister corev1listers.SecretLister - triggerLister eventingv1alpha1listers.TriggerLister -} - -var _ SecretReconciler = (*secretReconciler)(nil) - -func NewSecretReconciler(ctx context.Context, secretLister corev1listers.SecretLister, triggerLister eventingv1alpha1listers.TriggerLister) SecretReconciler { - return &secretReconciler{ - client: k8sclient.Get(ctx), - secretLister: secretLister, - triggerLister: triggerLister, - } -} - -func (r *secretReconciler) Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.Secret, error) { - desired, err := r.buildConfigSecret(ctx, rb) - if err != nil { - rb.GetReconcilableBrokerStatus().MarkConfigSecretFailed(ReasonFailedSecretCompose, "Failed to compose secret config from broker") - return nil, err - } - - current, err := r.secretLister.Secrets(desired.Namespace).Get(desired.Name) - switch { - case err == nil: - // Compare current object with desired, update if needed. - if !semantic.Semantic.DeepEqual(desired, current) { - desired.ResourceVersion = current.ResourceVersion - - current, err = r.client.CoreV1().Secrets(desired.Namespace).Update(ctx, desired, metav1.UpdateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to update secret", zap.String("secret", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkConfigSecretFailed(ReasonFailedSecretUpdate, "Failed to update config from secret") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedSecretUpdate, - "Failed to update config from secret %s: %w", fullname, err) - } - } - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current secret. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get secret", zap.String("secret", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkConfigSecretFailed(ReasonFailedSecretGet, "Failed to get config from secret") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedSecretGet, - "Failed to get config from secret %s: %w", fullname, err) - - default: - // The secret has not been found, create it. - current, err = r.client.CoreV1().Secrets(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create secret", zap.String("secret", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkConfigSecretFailed(ReasonFailedSecretCreate, "Failed to create secret for config") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedSecretCreate, - "Failed to create secret for config %s: %w", fullname, err) - } - } - - rb.GetReconcilableBrokerStatus().MarkConfigSecretReady() - - return current, nil -} - -func (r *secretReconciler) buildConfigSecret(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.Secret, error) { - meta := rb.GetObjectMeta() - ns, name := meta.GetNamespace(), meta.GetName() - - triggers, err := r.triggerLister.Triggers(ns).List(labels.Everything()) - if err != nil { - logging.FromContext(ctx).Error("Unable to list triggers at namespace", zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkConfigSecretFailed(ReasonFailedTriggerList, "Failed to list triggers") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedTriggerList, - "Failed to list triggers: %w", err) - } - - cfg := &broker.Config{ - Triggers: make(map[string]broker.Trigger), - } - for _, t := range triggers { - // Generate secret even if the trigger is not ready, as long as one of the URIs for target - // or DLS exist. - if !t.OwnerRefableMatchesBroker(rb) || (t.Status.TargetURI == nil && t.Status.DeadLetterSinkURI == nil) { - continue - } - - targetURI := "" - if t.Status.TargetURI != nil { - targetURI = t.Status.TargetURI.String() - } else { - // Configure empty URL so that all requests go to DLS when the target is - // not ready. - targetURI = "" - } - - do := &broker.DeliveryOptions{} - if t.Spec.Delivery != nil { - do.Retry = t.Spec.Delivery.Retry - do.BackoffDelay = t.Spec.Delivery.BackoffDelay - - if t.Spec.Delivery.BackoffPolicy != nil { - var bop broker.BackoffPolicyType - switch *t.Spec.Delivery.BackoffPolicy { - case duckv1.BackoffPolicyLinear: - bop = broker.BackoffPolicyLinear - - case duckv1.BackoffPolicyExponential: - bop = broker.BackoffPolicyLinear - } - do.BackoffPolicy = &bop - } - - if t.Status.DeadLetterSinkURI != nil { - uri := t.Status.DeadLetterSinkURI.String() - do.DeadLetterURL = &uri - } - } - - trg := broker.Trigger{ - Filters: t.Spec.Filters, - Target: broker.Target{ - URL: &targetURI, - DeliveryOptions: do, - }, - } - - if t.Spec.Bounds != nil { - trg.Bounds = &broker.TriggerBounds{} - if t.Spec.Bounds.ByDate != nil { - trg.Bounds.ByDate = &broker.Bounds{ - Start: t.Spec.Bounds.ByDate.Start, - End: t.Spec.Bounds.ByDate.End, - } - } - - if t.Spec.Bounds.ById != nil { - trg.Bounds.ByID = &broker.Bounds{ - Start: t.Spec.Bounds.ById.Start, - End: t.Spec.Bounds.ById.End, - } - } - } - - // Add Trigger data to config - cfg.Triggers[t.Name] = trg - } - - // TODO add user/password - - b, err := yaml.Marshal(cfg) - if err != nil { - logging.FromContext(ctx).Error("Unable to marshal configuration into YAML", zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkConfigSecretFailed(ReasonFailedConfigSerialize, "Failed to serialize configuration") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedConfigSerialize, - "Failed to serialize configuration: %w", err) - } - - sn := name + "-" + rb.GetOwnedObjectsSuffix() + "-" + secretResourceSuffix - - return resource.NewSecret(ns, sn, - resource.SecretWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "broker-config"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, sn), - resource.MetaAddOwner(meta, rb.GetGroupVersionKind())), - resource.SecretSetData(ConfigSecretKey, b)), nil -} diff --git a/pkg/reconciler/common/reconcile_serciveaccount.go b/pkg/reconciler/common/reconcile_serciveaccount.go deleted file mode 100644 index acc7823b..00000000 --- a/pkg/reconciler/common/reconcile_serciveaccount.go +++ /dev/null @@ -1,163 +0,0 @@ -package common - -import ( - "context" - - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - corev1listers "k8s.io/client-go/listers/core/v1" - rbacv1listers "k8s.io/client-go/listers/rbac/v1" - k8sclient "knative.dev/pkg/client/injection/kube/client" - "knative.dev/pkg/logging" - pkgreconciler "knative.dev/pkg/reconciler" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/resource" -) - -const ( - // Broker ClusterRole that was created as part of Typhoon core installation. - BrokerDeploymentRole = "typhoon-broker" - serviceAccountResourceSuffix = "broker" - roleBindingResourceSuffix = "broker" -) - -type ServiceAccountReconciler interface { - Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.ServiceAccount, *rbacv1.RoleBinding, error) -} - -type serviceAccountReconciler struct { - client kubernetes.Interface - serviceAccountLister corev1listers.ServiceAccountLister - roleBindingLister rbacv1listers.RoleBindingLister -} - -var _ ServiceAccountReconciler = (*serviceAccountReconciler)(nil) - -func NewServiceAccountReconciler(ctx context.Context, serviceAccountLister corev1listers.ServiceAccountLister, roleBindingLister rbacv1listers.RoleBindingLister) ServiceAccountReconciler { - return &serviceAccountReconciler{ - client: k8sclient.Get(ctx), - serviceAccountLister: serviceAccountLister, - roleBindingLister: roleBindingLister, - } -} - -func (r *serviceAccountReconciler) Reconcile(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.ServiceAccount, *rbacv1.RoleBinding, error) { - sa, err := r.reconcileServiceAccount(ctx, rb) - if err != nil { - return nil, nil, err - } - - roleb, err := r.reconcileRoleBinding(ctx, rb, sa) - if err != nil { - return nil, nil, err - } - - return sa, roleb, nil -} - -func buildBrokerServiceAccount(rb eventingv1alpha1.ReconcilableBroker) *corev1.ServiceAccount { - meta := rb.GetObjectMeta() - ns, name := meta.GetNamespace(), meta.GetName()+"-"+rb.GetOwnedObjectsSuffix()+"-"+serviceAccountResourceSuffix - - return resource.NewServiceAccount(ns, name, - resource.ServiceAccountWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "broker-serviceaccount"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, name), - resource.MetaAddOwner(meta, rb.GetGroupVersionKind()))) -} - -func (r *serviceAccountReconciler) reconcileServiceAccount(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker) (*corev1.ServiceAccount, error) { - desired := buildBrokerServiceAccount(rb) - current, err := r.serviceAccountLister.ServiceAccounts(desired.Namespace).Get(desired.Name) - - switch { - case err == nil: - // TODO compare - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current object. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get broker ServiceAccount", zap.String("serviceAccount", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerServiceAccountFailed(ReasonFailedServiceAccountGet, "Failed to get broker ServiceAccount") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedServiceAccountGet, - "Failed to get broker ServiceAccount %s: %w", fullname, err) - - default: - // The ServiceAccount has not been found, create it. - current, err = r.client.CoreV1().ServiceAccounts(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create broker ServiceAccount", zap.String("serviceAccount", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerServiceAccountFailed(ReasonFailedServiceAccountCreate, "Failed to create broker ServiceAccount") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedServiceAccountCreate, - "Failed to create broker ServiceAccount %s: %w", fullname, err) - } - } - - // Update status - rb.GetReconcilableBrokerStatus().MarkBrokerServiceAccountReady() - - return current, nil -} - -func buildBrokerRoleBinding(rb eventingv1alpha1.ReconcilableBroker, sa *corev1.ServiceAccount) *rbacv1.RoleBinding { - meta := rb.GetObjectMeta() - ns, name := meta.GetNamespace(), meta.GetName()+"-"+rb.GetOwnedObjectsSuffix()+"-"+roleBindingResourceSuffix - - return resource.NewRoleBinding(ns, name, BrokerDeploymentRole, sa.Name, - resource.RoleBindingWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "broker-rolebinding"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, name), - resource.MetaAddOwner(meta, rb.GetGroupVersionKind()))) -} - -func (r *serviceAccountReconciler) reconcileRoleBinding(ctx context.Context, rb eventingv1alpha1.ReconcilableBroker, sa *corev1.ServiceAccount) (*rbacv1.RoleBinding, error) { - desired := buildBrokerRoleBinding(rb, sa) - current, err := r.roleBindingLister.RoleBindings(desired.Namespace).Get(desired.Name) - - switch { - case err == nil: - // TODO compare - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current object. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get broker RoleBinding", zap.String("roleBinding", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerRoleBindingFailed(ReasonFailedRoleBindingGet, "Failed to get broker RoleBinding") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedRoleBindingGet, - "Failed to get broker RoleBinding %s: %w", fullname, err) - - default: - // The RoleBinding has not been found, create it. - current, err = r.client.RbacV1().RoleBindings(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create broker RoleBinding", zap.String("roleBinding", fullname.String()), zap.Error(err)) - rb.GetReconcilableBrokerStatus().MarkBrokerRoleBindingFailed(ReasonFailedRoleBindingCreate, "Failed to create broker RoleBinding") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, ReasonFailedRoleBindingCreate, - "Failed to create broker RoleBinding %s: %w", fullname, err) - } - } - - // Update status - rb.GetReconcilableBrokerStatus().MarkBrokerRoleBindingReady() - - return current, nil -} diff --git a/pkg/reconciler/redisbroker/controller.go b/pkg/reconciler/redisbroker/controller.go deleted file mode 100644 index a6178cef..00000000 --- a/pkg/reconciler/redisbroker/controller.go +++ /dev/null @@ -1,179 +0,0 @@ -package redisbroker - -import ( - "context" - - "github.com/kelseyhightower/envconfig" - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" - - kubeclient "knative.dev/pkg/client/injection/kube/client" - "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" - "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap" - endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" - "knative.dev/pkg/client/injection/kube/informers/core/v1/secret" - "knative.dev/pkg/client/injection/kube/informers/core/v1/service" - "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount" - rolebindingsinformer "knative.dev/pkg/client/injection/kube/informers/rbac/v1/rolebinding" - cmw "knative.dev/pkg/configmap" - "knative.dev/pkg/controller" - "knative.dev/pkg/logging" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - rbinformer "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker" - trginformer "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger" - - rbreconciler "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/eventing/v1alpha1/redisbroker" - "github.com/zeiss/typhoon/pkg/reconciler/common" - "github.com/zeiss/typhoon/pkg/reconciler/resource" -) - -// envConfig will be used to extract the required environment variables using -// github.com/kelseyhightower/envconfig. If this configuration cannot be extracted, then -// NewController will panic. -type envConfig struct { - RedisImage string `envconfig:"REDISBROKER_REDIS_IMAGE" required:"true"` - BrokerImage string `envconfig:"REDISBROKER_BROKER_IMAGE" required:"true"` - BrokerImagePullPolicy string `envconfig:"REDISBROKER_BROKER_IMAGE_PULL_POLICY" default:"IfNotPresent"` -} - -// NewController initializes the controller and is called by the generated code -// Registers event handlers to enqueue events -func NewController( - ctx context.Context, - cmw cmw.Watcher, -) *controller.Impl { - env := &envConfig{} - if err := envconfig.Process("", env); err != nil { - logging.FromContext(ctx).Panicf("unable to process RedisBroker's required environment variables: %v", err) - } - - rbInformer := rbinformer.Get(ctx) - trgInformer := trginformer.Get(ctx) - secretInformer := secret.Get(ctx) - configMapInformer := configmap.Get(ctx) - deploymentInformer := deployment.Get(ctx) - serviceInformer := service.Get(ctx) - endpointsInformer := endpointsinformer.Get(ctx) - serviceAccountInformer := serviceaccount.Get(ctx) - roleBindingsInformer := rolebindingsinformer.Get(ctx) - - _ = rolebindingsinformer.Get(ctx) - - r := &reconciler{ - secretReconciler: common.NewSecretReconciler(ctx, secretInformer.Lister(), trgInformer.Lister()), - configMapReconciler: common.NewConfigMapReconciler(ctx, configMapInformer.Lister()), - saReconciler: common.NewServiceAccountReconciler(ctx, serviceAccountInformer.Lister(), roleBindingsInformer.Lister()), - brokerReconciler: common.NewBrokerReconciler(ctx, deploymentInformer.Lister(), serviceInformer.Lister(), endpointsInformer.Lister(), - env.BrokerImage, corev1.PullPolicy(env.BrokerImagePullPolicy)), - - redisReconciler: redisReconciler{ - client: kubeclient.Get(ctx), - deploymentLister: deploymentInformer.Lister(), - serviceLister: serviceInformer.Lister(), - endpointsLister: endpointsInformer.Lister(), - image: env.RedisImage, - }, - } - - impl := rbreconciler.NewImpl(ctx, r) - - rb := &eventingv1alpha1.RedisBroker{} - gvk := rb.GetGroupVersionKind() - - rbInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) - - secretInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: controller.FilterController(rb), - Handler: controller.HandleAll(impl.EnqueueControllerOf), - }) - deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: controller.FilterController(rb), - Handler: controller.HandleAll(impl.EnqueueControllerOf), - }) - serviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: controller.FilterController(rb), - Handler: controller.HandleAll(impl.EnqueueControllerOf), - }) - endpointsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - ep, ok := obj.(*corev1.Endpoints) - if !ok || ep.Labels != nil || ep.Labels[resource.AppNameLabel] == common.AppAnnotationValue(rb) { - return false - } - - return true - }, - Handler: controller.HandleAll(func(obj interface{}) { - ep, ok := obj.(*corev1.Endpoints) - if !ok { - return - } - - svc, err := serviceInformer.Lister().Services(ep.Namespace).Get(ep.Name) - if err != nil { - // no matter the error, if we cannot retrieve the service we cannot - // read the owner and enqueue the key. - return - } - - impl.EnqueueControllerOf(svc) - }), - }) - serviceAccountInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: controller.FilterController(rb), - Handler: controller.HandleAll(impl.EnqueueControllerOf), - }) - roleBindingsInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: controller.FilterController(rb), - Handler: controller.HandleAll(impl.EnqueueControllerOf), - }) - - // Filter Triggers that reference a Redis broker. - filterTriggerForRedisBroker := func(obj interface{}) bool { - t, ok := obj.(*eventingv1alpha1.Trigger) - if !ok { - return false - } - - // TODO replace with defaulting when webhook is implemented - if !(t.Spec.Broker.Group == gvk.Group || t.Spec.Broker.Group == "") || - t.Spec.Broker.Kind != gvk.Kind { - return false - } - - // TODO replace with broker namespace when webhook defaulting is implemented - _, err := rbInformer.Lister().RedisBrokers(t.Namespace).Get(t.Spec.Broker.Name) - switch { - case err == nil: - return true - case !apierrs.IsNotFound(err): - logging.FromContext(ctx).Error("Unable to get Redis Broker", zap.Any("broker", t.Spec.Broker), zap.Error(err)) - } - - return false - } - - enqueueFromTrigger := func(obj interface{}) { - t, ok := obj.(*eventingv1alpha1.Trigger) - if !ok { - return - } - - impl.EnqueueKey(types.NamespacedName{ - Name: t.Spec.Broker.Name, - Namespace: t.Namespace, - }) - } - - trgInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: filterTriggerForRedisBroker, - Handler: controller.HandleAll(enqueueFromTrigger), - }) - - return impl -} diff --git a/pkg/reconciler/redisbroker/reconcile_redis.go b/pkg/reconciler/redisbroker/reconcile_redis.go deleted file mode 100644 index bd5dece2..00000000 --- a/pkg/reconciler/redisbroker/reconcile_redis.go +++ /dev/null @@ -1,225 +0,0 @@ -package redisbroker - -import ( - "context" - - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" - appsv1listers "k8s.io/client-go/listers/apps/v1" - corev1listers "k8s.io/client-go/listers/core/v1" - "knative.dev/eventing/pkg/apis/duck" - "knative.dev/pkg/logging" - pkgreconciler "knative.dev/pkg/reconciler" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/common" - "github.com/zeiss/typhoon/pkg/reconciler/resource" - "github.com/zeiss/typhoon/pkg/reconciler/semantic" -) - -const ( - redisResourceSuffix = "rb-redis" -) - -type redisReconciler struct { - client kubernetes.Interface - deploymentLister appsv1listers.DeploymentLister - serviceLister corev1listers.ServiceLister - endpointsLister corev1listers.EndpointsLister - image string -} - -func (r *redisReconciler) reconcile(ctx context.Context, rb *eventingv1alpha1.RedisBroker) (*appsv1.Deployment, *corev1.Service, error) { - if rb.IsUserProvidedRedis() { - // Nothing to do but mark the status for each of the elements reconciled. - rb.Status.MarkRedisUserProvided() - return nil, nil, nil - } - - d, err := r.reconcileDeployment(ctx, rb) - if err != nil { - return nil, nil, err - } - - svc, err := r.reconcileService(ctx, rb) - if err != nil { - return d, nil, err - } - - _, err = r.reconcileEndpoints(ctx, svc, rb) - if err != nil { - return d, nil, err - } - - return d, svc, nil -} - -func buildRedisDeployment(rb *eventingv1alpha1.RedisBroker, image string) *appsv1.Deployment { - return resource.NewDeployment(rb.Namespace, rb.Name+"-"+redisResourceSuffix, - resource.DeploymentWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, common.AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "redis-deployment"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, rb.Name+"-"+redisResourceSuffix), - resource.MetaAddOwner(rb, rb.GetGroupVersionKind())), - resource.DeploymentAddSelectorForTemplate(resource.AppComponentLabel, "redis-deployment"), - resource.DeploymentAddSelectorForTemplate(resource.AppInstanceLabel, rb.Name+"-"+redisResourceSuffix), - resource.DeploymentSetReplicas(1), - resource.DeploymentWithTemplateSpecOptions( - resource.PodTemplateSpecWithMetaOptions( - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - ), - resource.PodTemplateSpecWithPodSpecOptions( - resource.PodSpecAddContainer( - resource.NewContainer("redis", image, - resource.ContainerAddEnvFromValue("REDIS_ARGS", "--appendonly yes"), - resource.ContainerAddPort("redis", 6379)))))) -} - -func (r *redisReconciler) reconcileDeployment(ctx context.Context, rb *eventingv1alpha1.RedisBroker) (*appsv1.Deployment, error) { - desired := buildRedisDeployment(rb, r.image) - current, err := r.deploymentLister.Deployments(desired.Namespace).Get(desired.Name) - switch { - case err == nil: - // Compare current object with desired, update if needed. - if !semantic.Semantic.DeepEqual(desired, current) { - desired.Status = current.Status - desired.ResourceVersion = current.ResourceVersion - - current, err = r.client.AppsV1().Deployments(desired.Namespace).Update(ctx, desired, metav1.UpdateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to update the deployment", zap.String("deployment", fullname.String()), zap.Error(err)) - rb.Status.MarkRedisDeploymentFailed(common.ReasonFailedDeploymentUpdate, "Failed to update Redis deployment") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedDeploymentUpdate, - "Failed to get Redis deployment %s: %w", fullname, err) - } - } - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current deployment. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get the deployment", zap.String("deployment", fullname.String()), zap.Error(err)) - rb.Status.MarkRedisDeploymentFailed(common.ReasonFailedDeploymentGet, "Failed to get Redis deployment") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedDeploymentGet, - "Failed to get Redis deployment %s: %w", fullname, err) - - default: - // The deployment has not been found, create it. - current, err = r.client.AppsV1().Deployments(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create the deployment", zap.String("deployment", fullname.String()), zap.Error(err)) - rb.Status.MarkRedisDeploymentFailed(common.ReasonFailedDeploymentCreate, "Failed to create Redis deployment") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedDeploymentCreate, - "Failed to create Redis deployment %s: %w", fullname, err) - } - } - - // Update status based on deployment - rb.Status.PropagateRedisDeploymentAvailability(ctx, ¤t.Status) - - return current, nil -} - -func buildRedisService(rb *eventingv1alpha1.RedisBroker) *corev1.Service { - return resource.NewService(rb.Namespace, rb.Name+"-"+redisResourceSuffix, - resource.ServiceWithMetaOptions( - resource.MetaAddLabel(resource.AppNameLabel, common.AppAnnotationValue(rb)), - resource.MetaAddLabel(resource.AppComponentLabel, "redis-service"), - resource.MetaAddLabel(resource.AppPartOfLabel, resource.PartOf), - resource.MetaAddLabel(resource.AppManagedByLabel, resource.ManagedBy), - resource.MetaAddLabel(resource.AppInstanceLabel, rb.Name+"-"+redisResourceSuffix), - resource.MetaAddOwner(rb, rb.GetGroupVersionKind())), - resource.ServiceSetType(corev1.ServiceTypeClusterIP), - resource.ServiceAddSelectorLabel(resource.AppComponentLabel, "redis-deployment"), - resource.ServiceAddSelectorLabel(resource.AppInstanceLabel, rb.Name+"-"+redisResourceSuffix), - resource.ServiceAddPort("redis", 6379, 6379)) -} - -func (r *redisReconciler) reconcileService(ctx context.Context, rb *eventingv1alpha1.RedisBroker) (*corev1.Service, error) { - desired := buildRedisService(rb) - current, err := r.serviceLister.Services(desired.Namespace).Get(desired.Name) - switch { - case err == nil: - // Compare current object with desired, update if needed. - if !semantic.Semantic.DeepEqual(desired, current) { - desired.Status = current.Status - desired.ResourceVersion = current.ResourceVersion - - current, err = r.client.CoreV1().Services(desired.Namespace).Update(ctx, desired, metav1.UpdateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to update the service", zap.String("service", fullname.String()), zap.Error(err)) - rb.Status.MarkRedisServiceFailed(common.ReasonFailedServiceUpdate, "Failed to update Redis service") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedServiceUpdate, - "Failed to get Redis service %s: %w", fullname, err) - } - } - - case !apierrs.IsNotFound(err): - // An error occurred retrieving current object. - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to get the service", zap.String("service", fullname.String()), zap.Error(err)) - rb.Status.MarkRedisServiceFailed(common.ReasonFailedServiceGet, "Failed to get Redis service") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedServiceGet, - "Failed to get Redis service %s: %w", fullname, err) - - default: - // The object has not been found, create it. - current, err = r.client.CoreV1().Services(desired.Namespace).Create(ctx, desired, metav1.CreateOptions{}) - if err != nil { - fullname := types.NamespacedName{Namespace: desired.Namespace, Name: desired.Name} - logging.FromContext(ctx).Error("Unable to create the service", zap.String("service", fullname.String()), zap.Error(err)) - rb.Status.MarkRedisServiceFailed(common.ReasonFailedServiceCreate, "Failed to create Redis service") - - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedServiceCreate, - "Failed to create Redis service %s: %w", fullname, err) - } - } - - // Service exists and is up to date. - rb.Status.MarkRedisServiceReady() - - return current, nil -} - -func (r *redisReconciler) reconcileEndpoints(ctx context.Context, service *corev1.Service, rb *eventingv1alpha1.RedisBroker) (*corev1.Endpoints, error) { - ep, err := r.endpointsLister.Endpoints(service.Namespace).Get(service.Name) - switch { - case err == nil: - if duck.EndpointsAreAvailable(ep) { - rb.Status.MarkRedisEndpointsTrue() - return ep, nil - } - - rb.Status.MarkRedisEndpointsFailed(common.ReasonUnavailableEndpoints, "Endpoints for redis service are not available") - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonUnavailableEndpoints, - "Endpoints for redis service are not available %s", - types.NamespacedName{Namespace: ep.Namespace, Name: ep.Name}) - - case apierrs.IsNotFound(err): - rb.Status.MarkRedisEndpointsFailed(common.ReasonUnavailableEndpoints, "Endpoints for redis service do not exist") - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonUnavailableEndpoints, - "Endpoints for redis service do not exist %s", - types.NamespacedName{Namespace: service.Namespace, Name: service.Name}) - } - - fullname := types.NamespacedName{Namespace: service.Namespace, Name: service.Name} - rb.Status.MarkRedisEndpointsUnknown(common.ReasonFailedEndpointsGet, "Could not retrieve endpoints for redis service") - logging.FromContext(ctx).Error("Unable to get the redis service endpoints", zap.String("endpoint", fullname.String()), zap.Error(err)) - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedEndpointsGet, - "Failed to get redis service ednpoints %s: %w", fullname, err) -} diff --git a/pkg/reconciler/redisbroker/reconciler.go b/pkg/reconciler/redisbroker/reconciler.go deleted file mode 100644 index 63d811ef..00000000 --- a/pkg/reconciler/redisbroker/reconciler.go +++ /dev/null @@ -1,176 +0,0 @@ -package redisbroker - -import ( - "context" - "fmt" - "strconv" - "strings" - - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - - "knative.dev/pkg/apis" - "knative.dev/pkg/logging" - "knative.dev/pkg/network" - knreconciler "knative.dev/pkg/reconciler" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/common" - "github.com/zeiss/typhoon/pkg/reconciler/resource" -) - -const ( - defaultMaxLen = "1000" -) - -type reconciler struct { - secretReconciler common.SecretReconciler - configMapReconciler common.ConfigMapReconciler - saReconciler common.ServiceAccountReconciler - brokerReconciler common.BrokerReconciler - - redisReconciler redisReconciler -} - -// options that set Broker environment variables specific for the RedisBroker. -func redisDeploymentOption(rb *eventingv1alpha1.RedisBroker, redisSvc *corev1.Service) resource.ObjectOption { - return func(obj interface{}) { - d := obj.(*appsv1.Deployment) - - // Make sure the broker container exists before modifying it. - if len(d.Spec.Template.Spec.Containers) == 0 { - // Unexpected path. - panic("The Broker Deployment to be reconciled has no containers in it.") - } - - c := &d.Spec.Template.Spec.Containers[0] - - var stream string - if rb.Spec.Redis != nil && rb.Spec.Redis.Stream != nil && *rb.Spec.Redis.Stream != "" { - stream = *rb.Spec.Redis.Stream - } else { - stream = rb.Namespace + "." + rb.Name - } - resource.ContainerAddEnvFromValue("REDIS_STREAM", stream)(c) - - maxLen := defaultMaxLen - if rb.Spec.Redis != nil && rb.Spec.Redis.StreamMaxLen != nil { - maxLen = strconv.Itoa(*rb.Spec.Redis.StreamMaxLen) - } - resource.ContainerAddEnvFromValue("REDIS_STREAM_MAX_LEN", maxLen)(c) - - if rb.Spec.Redis != nil && rb.Spec.Redis.EnableTrackingID != nil && *rb.Spec.Redis.EnableTrackingID { - resource.ContainerAddEnvFromValue("REDIS_TRACKING_ID_ENABLED", "true")(c) - } - - if rb.IsUserProvidedRedis() { - - // Standalone connections require an address, while cluster connections require an - // address list of each endpoint available for the initial connection. - if rb.Spec.Redis.Connection.ClusterURLs != nil && - len(rb.Spec.Redis.Connection.ClusterURLs) != 0 { - resource.ContainerAddEnvFromValue("REDIS_CLUSTER_ADDRESSES", - strings.Join(rb.Spec.Redis.Connection.ClusterURLs, ","))(c) - } else { - resource.ContainerAddEnvFromValue("REDIS_ADDRESS", *rb.Spec.Redis.Connection.URL)(c) - } - - if rb.Spec.Redis.Connection.Username != nil { - resource.ContainerAddEnvVarFromSecret("REDIS_USERNAME", - rb.Spec.Redis.Connection.Username.SecretKeyRef.Name, - rb.Spec.Redis.Connection.Username.SecretKeyRef.Key)(c) - } - - if rb.Spec.Redis.Connection.Password != nil { - resource.ContainerAddEnvVarFromSecret("REDIS_PASSWORD", - rb.Spec.Redis.Connection.Password.SecretKeyRef.Name, - rb.Spec.Redis.Connection.Password.SecretKeyRef.Key)(c) - } - - if rb.Spec.Redis.Connection.TLSCACertificate != nil { - resource.ContainerAddEnvVarFromSecret("REDIS_TLS_CA_CERTIFICATE", - rb.Spec.Redis.Connection.TLSCACertificate.SecretKeyRef.Name, - rb.Spec.Redis.Connection.TLSCACertificate.SecretKeyRef.Key)(c) - } - - if rb.Spec.Redis.Connection.TLSCertificate != nil { - resource.ContainerAddEnvVarFromSecret("REDIS_TLS_CERTIFICATE", - rb.Spec.Redis.Connection.TLSCertificate.SecretKeyRef.Name, - rb.Spec.Redis.Connection.TLSCertificate.SecretKeyRef.Key)(c) - } - - if rb.Spec.Redis.Connection.TLSKey != nil { - resource.ContainerAddEnvVarFromSecret("REDIS_TLS_KEY", - rb.Spec.Redis.Connection.TLSKey.SecretKeyRef.Name, - rb.Spec.Redis.Connection.TLSKey.SecretKeyRef.Key)(c) - } - - if rb.Spec.Redis.Connection.TLSEnabled != nil && *rb.Spec.Redis.Connection.TLSEnabled { - resource.ContainerAddEnvFromValue("REDIS_TLS_ENABLED", "true")(c) - } - - if rb.Spec.Redis.Connection.TLSSkipVerify != nil && *rb.Spec.Redis.Connection.TLSSkipVerify { - tlsSkipVerifyDefault := "true" - // TODO this should be moved to webhook - if rb.Spec.Redis.Connection.TLSCACertificate != nil { - tlsSkipVerifyDefault = "false" - } - resource.ContainerAddEnvFromValue("REDIS_TLS_SKIP_VERIFY", tlsSkipVerifyDefault)(c) - } - - } else { - resource.ContainerAddEnvFromValue("REDIS_ADDRESS", - fmt.Sprintf("%s:%d", redisSvc.Name, redisSvc.Spec.Ports[0].Port))(c) - } - } -} - -func (r *reconciler) ReconcileKind(ctx context.Context, rb *eventingv1alpha1.RedisBroker) knreconciler.Event { - logging.FromContext(ctx).Infow("Reconciling", zap.Any("RedisBroker", *rb)) - - // Make sure the Redis deployment and service exists. - _, redisSvc, err := r.redisReconciler.reconcile(ctx, rb) - if err != nil { - return err - } - - // Iterate triggers and make sure the secret contains them. - secret, err := r.secretReconciler.Reconcile(ctx, rb) - if err != nil { - return err - } - - // Make sure the ConfigMap exists. - configMap, err := r.configMapReconciler.Reconcile(ctx, rb) - if err != nil { - return err - } - - // Make sure the Broker service account and roles exists. - sa, _, err := r.saReconciler.Reconcile(ctx, rb) - if err != nil { - return err - } - - // Make sure the Broker deployment exists and that it points to the Redis service. - _, brokerSvc, err := r.brokerReconciler.Reconcile(ctx, rb, sa, secret, configMap, redisDeploymentOption(rb, redisSvc)) - if err != nil { - return err - } - - // Set address to the Broker service. - rb.Status.SetAddress(getServiceAddress(brokerSvc)) - - return nil -} - -func getServiceAddress(svc *corev1.Service) *apis.URL { - var port string - if svc.Spec.Ports[0].Port != 80 { - port = ":" + strconv.Itoa(int(svc.Spec.Ports[0].Port)) - } - - return apis.HTTP( - network.GetServiceHostname(svc.Name, svc.Namespace) + port) -} diff --git a/pkg/reconciler/trigger/controller.go b/pkg/reconciler/trigger/controller.go deleted file mode 100644 index a9d903e5..00000000 --- a/pkg/reconciler/trigger/controller.go +++ /dev/null @@ -1,174 +0,0 @@ -package trigger - -import ( - "context" - - "go.uber.org/zap" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" - "knative.dev/pkg/configmap" - "knative.dev/pkg/controller" - "knative.dev/pkg/kmeta" - "knative.dev/pkg/logging" - "knative.dev/pkg/resolver" - - cfgInformer "knative.dev/pkg/client/injection/kube/informers/core/v1/configmap" - - "github.com/zeiss/typhoon/pkg/apis/eventing" - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - rbinformer "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/redisbroker" - tginformer "github.com/zeiss/typhoon/pkg/client/generated/injection/informers/eventing/v1alpha1/trigger" - tgreconciler "github.com/zeiss/typhoon/pkg/client/generated/injection/reconciler/eventing/v1alpha1/trigger" -) - -// NewController initializes the controller and is called by the generated code -// Registers event handlers to enqueue events -func NewController( - ctx context.Context, - cmw configmap.Watcher, -) *controller.Impl { - tgInformer := tginformer.Get(ctx) - rbInformer := rbinformer.Get(ctx) - cmInformer := cfgInformer.Get(ctx) - - r := &Reconciler{ - rbLister: rbInformer.Lister(), - cmLister: cmInformer.Lister(), - } - - impl := tgreconciler.NewImpl(ctx, r) - - r.uriResolver = resolver.NewURIResolverFromTracker(ctx, impl.Tracker) - - tgInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) - - // Filter brokers that are referenced by triggers. - filterBroker := func(obj interface{}) bool { - // TODO duck - var accessor kmeta.OwnerRefableAccessor - rb := obj.(*eventingv1alpha1.RedisBroker) - accessor = kmeta.OwnerRefableAccessor(rb) - - tgl, err := tgInformer.Lister().Triggers(accessor.GetNamespace()).List(labels.Everything()) - if err != nil { - logging.FromContext(ctx).Error("Unable to list Triggers", zap.Error(err)) - return false - } - - for _, tg := range tgl { - if tg.OwnerRefableMatchesBroker(accessor) { - return true - } - } - - return false - } - - enqueueFromBroker := func(obj interface{}) { - // TODO duck - var accessor kmeta.OwnerRefableAccessor - rb := obj.(*eventingv1alpha1.RedisBroker) - accessor = kmeta.OwnerRefableAccessor(rb) - - tgl, err := tgInformer.Lister().Triggers(accessor.GetNamespace()).List(labels.Everything()) - if err != nil { - logging.FromContext(ctx).Error("Unable to list Triggers", zap.Error(err)) - return - } - - for _, tg := range tgl { - if tg.OwnerRefableMatchesBroker(accessor) { - impl.EnqueueKey(types.NamespacedName{ - Name: tg.Name, - Namespace: tg.Namespace, - }) - } - } - } - - filterConfigMapBroker := func(obj interface{}) bool { - cm, ok := obj.(*corev1.ConfigMap) - if !ok { - return false - } - - // Get the list of owner references and filter for those that - // are owned by a Broker. - obs := eventing.GetOwnerBrokers(cm) - if len(obs) == 0 { - return false - } - - // Iterate all triggers at the namespace and select those that are applied - // to the ConfigMap broker(s). - tgs, err := tgInformer.Lister().Triggers(cm.Namespace).List(labels.Everything()) - if err != nil { - logging.FromContext(ctx).Error("Unable to list Triggers", zap.Error(err)) - return false - } - - // Finding one will make the filter pass. - for i := range tgs { - for j := range obs { - if tgs[i].OwnerReferenceMatchesBroker(obs[j]) { - return true - } - } - } - - // No triggers that match the brokers found, do not enqueue. - return false - } - - enqueueFromConfigMapBroker := func(obj interface{}) { - cm, ok := obj.(*corev1.ConfigMap) - if !ok { - return - } - - // Get the list of owner references and filter for those that - // are owned by a Broker. - obs := eventing.GetOwnerBrokers(cm) - if len(obs) == 0 { - return - } - - // Iterate all triggers at the namespace and select those that are applied - // to the ConfigMap broker(s). - tgs, err := tgInformer.Lister().Triggers(cm.Namespace).List(labels.Everything()) - if err != nil { - logging.FromContext(ctx).Error("Unable to list Triggers", zap.Error(err)) - return - } - - // Iterate all triggers at the namespace and select those that are applied - // to the ConfigMap broker(s). - // Finding one will make the filter pass. - for i := range tgs { - for j := range obs { - if tgs[i].OwnerReferenceMatchesBroker(obs[j]) { - impl.EnqueueKey(types.NamespacedName{ - Name: tgs[i].Name, - Namespace: tgs[i].Namespace, - }) - break - } - } - } - } - - rbInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: filterBroker, - Handler: controller.HandleAll(enqueueFromBroker), - }) - - cmInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: filterConfigMapBroker, - Handler: controller.HandleAll(enqueueFromConfigMapBroker), - }) - - return impl -} diff --git a/pkg/reconciler/trigger/reconciler.go b/pkg/reconciler/trigger/reconciler.go deleted file mode 100644 index ec727d8b..00000000 --- a/pkg/reconciler/trigger/reconciler.go +++ /dev/null @@ -1,225 +0,0 @@ -package trigger - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - corev1listers "k8s.io/client-go/listers/core/v1" - "knative.dev/pkg/controller" - "knative.dev/pkg/logging" - pkgreconciler "knative.dev/pkg/reconciler" - "knative.dev/pkg/resolver" - - "github.com/zeiss/typhoon/pkg/brokers/status" - - eventingv1alpha1 "github.com/zeiss/typhoon/pkg/apis/eventing/v1alpha1" - eventingv1alpha1listers "github.com/zeiss/typhoon/pkg/client/generated/listers/eventing/v1alpha1" - "github.com/zeiss/typhoon/pkg/reconciler/common" -) - -type Reconciler struct { - // TODO duck brokers - rbLister eventingv1alpha1listers.RedisBrokerLister - cmLister corev1listers.ConfigMapLister - uriResolver *resolver.URIResolver -} - -func (r *Reconciler) ReconcileKind(ctx context.Context, t *eventingv1alpha1.Trigger) pkgreconciler.Event { - b, err := r.resolveBroker(ctx, t) - if err != nil { - return err - } - - if err := r.resolveTarget(ctx, t); err != nil { - return err - } - - if err := r.resolveDLS(ctx, t); err != nil { - return err - } - - return r.reconcileStatusConfigMap(ctx, t, b) -} - -func (r *Reconciler) resolveBroker(ctx context.Context, t *eventingv1alpha1.Trigger) (eventingv1alpha1.ReconcilableBroker, pkgreconciler.Event) { - // TODO duck - // TODO move to webhook - switch { - case t.Spec.Broker.Group == "": - t.Spec.Broker.Group = eventingv1alpha1.SchemeGroupVersion.Group - case t.Spec.Broker.Group != eventingv1alpha1.SchemeGroupVersion.Group: - return nil, controller.NewPermanentError(fmt.Errorf("not supported Broker Group %q", t.Spec.Broker.Group)) - } - - var rb *eventingv1alpha1.RedisBroker - if t.Spec.Broker.Kind == rb.GetGroupVersionKind().Kind { - return r.resolveRedisBroker(ctx, t) - } - - return nil, controller.NewPermanentError(fmt.Errorf("not supported Broker Kind %q", t.Spec.Broker.Kind)) -} - -func (r *Reconciler) resolveRedisBroker(ctx context.Context, t *eventingv1alpha1.Trigger) (eventingv1alpha1.ReconcilableBroker, pkgreconciler.Event) { - rb, err := r.rbLister.RedisBrokers(t.Namespace).Get(t.Spec.Broker.Name) - if err != nil { - if apierrs.IsNotFound(err) { - logging.FromContext(ctx).Errorw(fmt.Sprintf("Trigger %s/%s references non existing broker %q", t.Namespace, t.Name, t.Spec.Broker.Name)) - t.Status.MarkBrokerFailed(common.ReasonBrokerDoesNotExist, "Broker %q does not exist", t.Spec.Broker.Name) - // No need to requeue, we will be notified when if broker is created. - return nil, controller.NewPermanentError(err) - } - - t.Status.MarkBrokerFailed(common.ReasonFailedBrokerGet, "Failed to get broker %q : %s", t.Spec.Broker, err) - return nil, pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedBrokerGet, - "Failed to get broker for trigger %s/%s: %w", t.Namespace, t.Name, err) - } - - t.Status.PropagateBrokerCondition(rb.Status.GetTopLevelCondition()) - - // No need to requeue, we'll get requeued when broker changes status. - if !rb.IsReady() { - logging.FromContext(ctx).Errorw(fmt.Sprintf("Trigger %s/%s references non ready broker %q", t.Namespace, t.Name, t.Spec.Broker.Name)) - } - - return rb, nil -} - -func (r *Reconciler) resolveTarget(ctx context.Context, t *eventingv1alpha1.Trigger) pkgreconciler.Event { - if t.Spec.Target.Ref != nil && t.Spec.Target.Ref.Namespace == "" { - // To call URIFromDestinationV1(ctx context.Context, dest v1.Destination, parent interface{}), dest.Ref must have a Namespace - // If Target.Ref.Namespace is nil, We will use the Namespace of Trigger as the Namespace of dest.Ref - t.Spec.Target.Ref.Namespace = t.Namespace - } - - targetURI, err := r.uriResolver.URIFromDestinationV1(ctx, t.Spec.Target, t) - if err != nil { - logging.FromContext(ctx).Errorw("Unable to get the target's URI", zap.Error(err)) - t.Status.MarkTargetResolvedFailed("Unable to get the target's URI", "%v", err) - t.Status.TargetURI = nil - return pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedResolveReference, - "Failed to get target's URI: %w", err) - } - - t.Status.TargetURI = targetURI - t.Status.MarkTargetResolvedSucceeded() - - return nil -} - -func (r *Reconciler) resolveDLS(ctx context.Context, t *eventingv1alpha1.Trigger) pkgreconciler.Event { - if t.Spec.Delivery == nil || t.Spec.Delivery.DeadLetterSink == nil { - t.Status.DeadLetterSinkURI = nil - t.Status.MarkDeadLetterSinkNotConfigured() - return nil - } - - if t.Spec.Delivery.DeadLetterSink.Ref != nil && t.Spec.Delivery.DeadLetterSink.Ref.Namespace == "" { - // To call URIFromDestinationV1(ctx context.Context, dest v1.Destination, parent interface{}), dest.Ref must have a Namespace - // If Target.Ref.Namespace is nil, We will use the Namespace of Trigger as the Namespace of dest.Ref - t.Spec.Delivery.DeadLetterSink.Ref.Namespace = t.Namespace - } - - dlsURI, err := r.uriResolver.URIFromDestinationV1(ctx, *t.Spec.Delivery.DeadLetterSink, t) - if err != nil { - logging.FromContext(ctx).Errorw("Unable to get the dead letter sink's URI", zap.Error(err)) - t.Status.MarkDeadLetterSinkResolvedFailed("Unable to get the dead letter sink's URI", "%v", err) - t.Status.TargetURI = nil - return pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonFailedResolveReference, - "Failed to get dead letter sink's URI: %w", err) - } - - t.Status.DeadLetterSinkURI = dlsURI - t.Status.MarkDeadLetterSinkResolvedSucceeded() - - return nil -} - -func (r *Reconciler) reconcileStatusConfigMap(ctx context.Context, t *eventingv1alpha1.Trigger, b eventingv1alpha1.ReconcilableBroker) pkgreconciler.Event { - configMapName := common.GetBrokerConfigMapName(b) - - cm, err := r.cmLister.ConfigMaps(t.Namespace).Get(configMapName) - if err != nil { - if apierrs.IsNotFound(err) { - logging.FromContext(ctx).Errorf("Trigger %s/%s could not find the Status ConfigMap for the referenced broker %q", t.Namespace, t.Name, configMapName) - t.Status.MarkStatusConfigMapFailed(common.ReasonStatusConfigMapDoesNotExist, "Status ConfigMap %q does not exist", configMapName) - // No need to requeue, we will be notified when the status ConfigMap is created. - return controller.NewPermanentError(err) - } - - t.Status.MarkStatusConfigMapFailed(common.ReasonStatusConfigMapGetFailed, "Failed to get ConfigMap for broker %q : %s", configMapName, err) - return pkgreconciler.NewEvent(corev1.EventTypeWarning, common.ReasonStatusConfigMapGetFailed, - "Failed to get ConfigMap for broker %s: %w", configMapName, err) - } - - cmst, ok := cm.Data[common.ConfigMapStatusKey] - if !ok { - errmsg := fmt.Sprintf("ConfigMap %q does not contain key %q", configMapName, common.ConfigMapStatusKey) - t.Status.MarkStatusConfigMapFailed(common.ReasonStatusConfigMapReadFailed, errmsg) - // No need to requeue, we will be notified when the status ConfigMap is updated. - return controller.NewPermanentError(errors.New(errmsg)) - } - - sts := map[string]status.Status{} - if err := json.Unmarshal([]byte(cmst), &sts); err != nil { - errmsg := fmt.Sprintf("ConfigMap %s/%s could not be unmarshalled as a status: %v", configMapName, common.ConfigMapStatusKey, err) - t.Status.MarkStatusConfigMapFailed(common.ReasonStatusConfigMapReadFailed, errmsg) - // No need to requeue, we will be notified when the status ConfigMap is updated. - return controller.NewPermanentError(errors.New(errmsg)) - } - - return r.summarizeStatus(t, sts) -} - -func (r *Reconciler) summarizeStatus(t *eventingv1alpha1.Trigger, sts map[string]status.Status) pkgreconciler.Event { - // Iterate all nodes and take note of the status for this trigger - var temp status.SubscriptionStatusChoice - for instance, st := range sts { - subs, ok := st.Subscriptions[t.Name] - if !ok { - continue - } - - switch subs.Status { - case status.SubscriptionStatusFailed: - // If one instance reports failure, consider the trigger failed. - errmsg := fmt.Sprintf("subscription failure reported by %s", instance) - t.Status.MarkStatusConfigMapFailed(common.ReasonStatusSubscriptionFailed, errmsg) - return controller.NewPermanentError(errors.New(errmsg)) - - case status.SubscriptionStatusComplete: - // If one instance reports complete, consider the trigger completed. - // Note: this is eventually consistent, some nodes might be still sending events! - t.Status.MarkStatusConfigMapSucceeded(common.ReasonStatusSubscriptionCompleted, fmt.Sprintf("subscription failure reported by %s", instance)) - return nil - - case status.SubscriptionStatusReady: - // Running state takes precedence over ready state. - if temp != status.SubscriptionStatusRunning { - temp = status.SubscriptionStatusReady - } - - case status.SubscriptionStatusRunning: - if temp != status.SubscriptionStatusRunning { - temp = status.SubscriptionStatusRunning - } - } - } - - switch temp { - case status.SubscriptionStatusReady: - t.Status.MarkStatusConfigMapSucceeded(common.ReasonStatusSubscriptionReady, "subscription ready to dispatch events") - - case status.SubscriptionStatusRunning: - t.Status.MarkStatusConfigMapSucceeded(common.ReasonStatusSubscriptionRunning, "subscription running") - - default: - t.Status.MarkStatusConfigMapSucceeded(common.ReasonStatusSubscriptionUnknown, "no subscription status information") - } - - return nil -}