diff --git a/changelogs/unreleased/6016-relu-small.md b/changelogs/unreleased/6016-relu-small.md new file mode 100644 index 00000000000..54f7a59fc3c --- /dev/null +++ b/changelogs/unreleased/6016-relu-small.md @@ -0,0 +1 @@ +Envoy: Adds support for setting [per-host circuit breaker max-connections threshold](https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-per-host-thresholds) using a new service-level annotation: `projectcontour.io/per-host-max-connections`. diff --git a/internal/annotation/annotations.go b/internal/annotation/annotations.go index 4312a7df110..5c39058a461 100644 --- a/internal/annotation/annotations.go +++ b/internal/annotation/annotations.go @@ -61,13 +61,14 @@ var annotationsByKind = map[string]map[string]struct{}{ "projectcontour.io/websocket-routes": {}, }, "Service": { - "projectcontour.io/max-connections": {}, - "projectcontour.io/max-pending-requests": {}, - "projectcontour.io/max-requests": {}, - "projectcontour.io/max-retries": {}, - "projectcontour.io/upstream-protocol.h2": {}, - "projectcontour.io/upstream-protocol.h2c": {}, - "projectcontour.io/upstream-protocol.tls": {}, + "projectcontour.io/max-connections": {}, + "projectcontour.io/max-pending-requests": {}, + "projectcontour.io/max-requests": {}, + "projectcontour.io/max-retries": {}, + "projectcontour.io/per-host-max-connections": {}, + "projectcontour.io/upstream-protocol.h2": {}, + "projectcontour.io/upstream-protocol.h2c": {}, + "projectcontour.io/upstream-protocol.tls": {}, }, "HTTPProxy": { "kubernetes.io/ingress.class": {}, @@ -256,3 +257,12 @@ func MaxRequests(o metav1.Object) uint32 { func MaxRetries(o metav1.Object) uint32 { return parseUInt32(ContourAnnotation(o, "max-retries")) } + +// PerHostMaxConnections returns the value of the first matching +// per-host-max-connectionss annotation for the following annotations: +// 1. projectcontour.io/per-host-max-connections +// +// '0' is returned if the annotation is absent or unparsable. +func PerHostMaxConnections(o metav1.Object) uint32 { + return parseUInt32(ContourAnnotation(o, "per-host-max-connections")) +} diff --git a/internal/dag/accessors.go b/internal/dag/accessors.go index 85460ae3149..3aff82c8c7a 100644 --- a/internal/dag/accessors.go +++ b/internal/dag/accessors.go @@ -62,12 +62,13 @@ func (d *DAG) EnsureService(meta types.NamespacedName, port int, healthPort int, HealthPort: healthSvcPort, Weight: 1, }, - Protocol: upstreamProtocol(svc, svcPort), - MaxConnections: annotation.MaxConnections(svc), - MaxPendingRequests: annotation.MaxPendingRequests(svc), - MaxRequests: annotation.MaxRequests(svc), - MaxRetries: annotation.MaxRetries(svc), - ExternalName: externalName(svc), + Protocol: upstreamProtocol(svc, svcPort), + MaxConnections: annotation.MaxConnections(svc), + MaxPendingRequests: annotation.MaxPendingRequests(svc), + MaxRequests: annotation.MaxRequests(svc), + MaxRetries: annotation.MaxRetries(svc), + PerHostMaxConnections: annotation.PerHostMaxConnections(svc), + ExternalName: externalName(svc), }, nil } diff --git a/internal/dag/builder_test.go b/internal/dag/builder_test.go index fb4b435059c..46a046b3030 100644 --- a/internal/dag/builder_test.go +++ b/internal/dag/builder_test.go @@ -7159,10 +7159,11 @@ func TestDAGInsert(t *testing.T) { Name: "kuard", Namespace: "default", Annotations: map[string]string{ - "projectcontour.io/max-connections": "9000", - "projectcontour.io/max-pending-requests": "4096", - "projectcontour.io/max-requests": "404", - "projectcontour.io/max-retries": "7", + "projectcontour.io/max-connections": "9000", + "projectcontour.io/max-pending-requests": "4096", + "projectcontour.io/max-requests": "404", + "projectcontour.io/max-retries": "7", + "projectcontour.io/per-host-max-connections": "45", }, }, Spec: v1.ServiceSpec{ @@ -10895,10 +10896,11 @@ func TestDAGInsert(t *testing.T) { ServicePort: s1b.Spec.Ports[0], HealthPort: s1b.Spec.Ports[0], }, - MaxConnections: 9000, - MaxPendingRequests: 4096, - MaxRequests: 404, - MaxRetries: 7, + MaxConnections: 9000, + MaxPendingRequests: 4096, + MaxRequests: 404, + MaxRetries: 7, + PerHostMaxConnections: 45, }), ), ), diff --git a/internal/dag/dag.go b/internal/dag/dag.go index aea888e4b16..2a7c61b8ec7 100644 --- a/internal/dag/dag.go +++ b/internal/dag/dag.go @@ -966,6 +966,10 @@ type Service struct { // Envoy will allow to the upstream cluster. MaxRetries uint32 + // PerHostMaxConnections is the maximum number of connections + // that Envoy will allow to each individual host in a cluster. + PerHostMaxConnections uint32 + // ExternalName is an optional field referencing a dns entry for Service type "ExternalName" ExternalName string } diff --git a/internal/envoy/v3/cluster.go b/internal/envoy/v3/cluster.go index e39beedadee..5709449e99b 100644 --- a/internal/envoy/v3/cluster.go +++ b/internal/envoy/v3/cluster.go @@ -78,7 +78,7 @@ func Cluster(c *dag.Cluster) *envoy_cluster_v3.Cluster { cluster.IgnoreHealthOnHostRemoval = true } - if envoy.AnyPositive(service.MaxConnections, service.MaxPendingRequests, service.MaxRequests, service.MaxRetries) { + if envoy.AnyPositive(service.MaxConnections, service.MaxPendingRequests, service.MaxRequests, service.MaxRetries, service.PerHostMaxConnections) { cluster.CircuitBreakers = &envoy_cluster_v3.CircuitBreakers{ Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ MaxConnections: protobuf.UInt32OrNil(service.MaxConnections), @@ -86,6 +86,9 @@ func Cluster(c *dag.Cluster) *envoy_cluster_v3.Cluster { MaxRequests: protobuf.UInt32OrNil(service.MaxRequests), MaxRetries: protobuf.UInt32OrNil(service.MaxRetries), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ + MaxConnections: protobuf.UInt32OrNil(service.PerHostMaxConnections), + }}, } } diff --git a/internal/envoy/v3/cluster_test.go b/internal/envoy/v3/cluster_test.go index 42ebd5b108e..8f8a70be12e 100644 --- a/internal/envoy/v3/cluster_test.go +++ b/internal/envoy/v3/cluster_test.go @@ -347,6 +347,7 @@ func TestCluster(t *testing.T) { Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ MaxConnections: wrapperspb.UInt32(9000), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}}, }, }, }, @@ -375,6 +376,7 @@ func TestCluster(t *testing.T) { Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ MaxPendingRequests: wrapperspb.UInt32(4096), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}}, }, }, }, @@ -403,6 +405,7 @@ func TestCluster(t *testing.T) { Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ MaxRequests: wrapperspb.UInt32(404), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}}, }, }, }, @@ -431,6 +434,36 @@ func TestCluster(t *testing.T) { Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ MaxRetries: wrapperspb.UInt32(7), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}}, + }, + }, + }, + "projectcontour.io/per-host-max-connections": { + cluster: &dag.Cluster{ + Upstream: &dag.Service{ + PerHostMaxConnections: 45, + Weighted: dag.WeightedService{ + Weight: 1, + ServiceName: s1.Name, + ServiceNamespace: s1.Namespace, + ServicePort: s1.Spec.Ports[0], + HealthPort: s1.Spec.Ports[0], + }, + }, + }, + want: &envoy_cluster_v3.Cluster{ + Name: "default/kuard/443/da39a3ee5e", + AltStatName: "default_kuard_443", + ClusterDiscoveryType: ClusterDiscoveryType(envoy_cluster_v3.Cluster_EDS), + EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{ + EdsConfig: ConfigSource("contour"), + ServiceName: "default/kuard/http", + }, + CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{ + Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ + MaxConnections: wrapperspb.UInt32(45), + }}, }, }, }, diff --git a/internal/featuretests/v3/cluster_test.go b/internal/featuretests/v3/cluster_test.go index 0c9c3dd0ae5..8d09336e4c4 100644 --- a/internal/featuretests/v3/cluster_test.go +++ b/internal/featuretests/v3/cluster_test.go @@ -392,6 +392,7 @@ func TestClusterCircuitbreakerAnnotations(t *testing.T) { Annotate("projectcontour.io/max-pending-requests", "4096"). Annotate("projectcontour.io/max-requests", "404"). Annotate("projectcontour.io/max-retries", "7"). + Annotate("projectcontour.io/per-host-max-connections", "45"). WithPorts(v1.ServicePort{Port: 8080, TargetPort: intstr.FromString("8080")}) i1 := &networking_v1.Ingress{ @@ -425,6 +426,9 @@ func TestClusterCircuitbreakerAnnotations(t *testing.T) { MaxRequests: wrapperspb.UInt32(404), MaxRetries: wrapperspb.UInt32(7), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ + MaxConnections: wrapperspb.UInt32(45), + }}, }, }), ), @@ -455,6 +459,7 @@ func TestClusterCircuitbreakerAnnotations(t *testing.T) { Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ MaxPendingRequests: wrapperspb.UInt32(9999), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}}, }, }), ), diff --git a/internal/xdscache/v3/cluster_test.go b/internal/xdscache/v3/cluster_test.go index 6cf891fe73f..8fd6f7f7d5e 100644 --- a/internal/xdscache/v3/cluster_test.go +++ b/internal/xdscache/v3/cluster_test.go @@ -743,10 +743,11 @@ func TestClusterVisit(t *testing.T) { "default", "kuard", map[string]string{ - "projectcontour.io/max-connections": "9000", - "projectcontour.io/max-pending-requests": "4096", - "projectcontour.io/max-requests": "404", - "projectcontour.io/max-retries": "7", + "projectcontour.io/max-connections": "9000", + "projectcontour.io/max-pending-requests": "4096", + "projectcontour.io/max-requests": "404", + "projectcontour.io/max-retries": "7", + "projectcontour.io/per-host-max-connections": "45", }, v1.ServicePort{ Protocol: "TCP", @@ -771,6 +772,9 @@ func TestClusterVisit(t *testing.T) { MaxRequests: wrapperspb.UInt32(404), MaxRetries: wrapperspb.UInt32(7), }}, + PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{ + MaxConnections: wrapperspb.UInt32(45), + }}, }, }, ), diff --git a/site/content/docs/main/config/annotations.md b/site/content/docs/main/config/annotations.md index cfac0b5e67e..4d805f8f0f1 100644 --- a/site/content/docs/main/config/annotations.md +++ b/site/content/docs/main/config/annotations.md @@ -62,6 +62,7 @@ A [Kubernetes Service][9] maps to an [Envoy Cluster][10]. Envoy clusters have ma - `projectcontour.io/max-pending-requests`: [The maximum number of pending requests][13] that a single Envoy instance allows to the Kubernetes Service; defaults to 1024. - `projectcontour.io/max-requests`: [The maximum parallel requests][13] a single Envoy instance allows to the Kubernetes Service; defaults to 1024 - `projectcontour.io/max-retries`: [The maximum number of parallel retries][14] a single Envoy instance allows to the Kubernetes Service; defaults to 3. This is independent of the per-Kubernetes Ingress number of retries (`projectcontour.io/num-retries`) and retry-on (`projectcontour.io/retry-on`), which control whether retries are attempted and how many times a single request can retry. +- `projectcontour.io/per-host-max-connections`: [The maximum number of connections][20] that a single Envoy instance allows to an individual Kubernetes Service endpoint; no default (unlimited). - `projectcontour.io/upstream-protocol.{protocol}` : The protocol used to proxy requests to the upstream service. The annotation value contains a comma-separated list of port names and/or numbers that must match with the ones defined in the `Service` definition. This value can also be specified in the `spec.routes.services[].protocol` field on the HTTPProxy object, where it takes precedence over the Service annotation. @@ -93,4 +94,5 @@ A [Kubernetes Service][9] maps to an [Envoy Cluster][10]. Envoy clusters have ma [16]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-virtualhost-require-tls [17]: api/#projectcontour.io/v1.UpstreamValidation [18]: ../config/tls-delegation/ -[19]: https://github.com/projectcontour/contour/issues/3544 \ No newline at end of file +[19]: https://github.com/projectcontour/contour/issues/3544 +[20]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-per-host-thresholds \ No newline at end of file