Skip to content

Commit

Permalink
internal/dag: Add support for per-host max connections
Browse files Browse the repository at this point in the history
Support per-host max connections circuit breaker threshold enabling Envoy to
enforce a maximum number of connections for each individual Kubernetes service
endpoint using a new service-level annotation: `projectcontour.io/per-host-max-connections`.

Resolves #6015

Signed-off-by: Aurel Canciu <aurelcanciu@gmail.com>
  • Loading branch information
relu committed Dec 14, 2023
1 parent 4d48db7 commit cf85ee0
Show file tree
Hide file tree
Showing 9 changed files with 91 additions and 27 deletions.
24 changes: 17 additions & 7 deletions internal/annotation/annotations.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,14 @@ var annotationsByKind = map[string]map[string]struct{}{
"projectcontour.io/websocket-routes": {},
},
"Service": {
"projectcontour.io/max-connections": {},
"projectcontour.io/max-pending-requests": {},
"projectcontour.io/max-requests": {},
"projectcontour.io/max-retries": {},
"projectcontour.io/upstream-protocol.h2": {},
"projectcontour.io/upstream-protocol.h2c": {},
"projectcontour.io/upstream-protocol.tls": {},
"projectcontour.io/max-connections": {},
"projectcontour.io/max-pending-requests": {},
"projectcontour.io/max-requests": {},
"projectcontour.io/max-retries": {},
"projectcontour.io/per-host-max-connections": {},
"projectcontour.io/upstream-protocol.h2": {},
"projectcontour.io/upstream-protocol.h2c": {},
"projectcontour.io/upstream-protocol.tls": {},
},
"HTTPProxy": {
"kubernetes.io/ingress.class": {},
Expand Down Expand Up @@ -256,3 +257,12 @@ func MaxRequests(o metav1.Object) uint32 {
func MaxRetries(o metav1.Object) uint32 {
return parseUInt32(ContourAnnotation(o, "max-retries"))
}

// PerHostMaxConnections returns the value of the first matching
// per-host-max-connectionss annotation for the following annotations:
// 1. projectcontour.io/per-host-max-connections
//
// '0' is returned if the annotation is absent or unparsable.
func PerHostMaxConnections(o metav1.Object) uint32 {
return parseUInt32(ContourAnnotation(o, "per-host-max-connections"))
}
13 changes: 7 additions & 6 deletions internal/dag/accessors.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,13 @@ func (d *DAG) EnsureService(meta types.NamespacedName, port int, healthPort int,
HealthPort: healthSvcPort,
Weight: 1,
},
Protocol: upstreamProtocol(svc, svcPort),
MaxConnections: annotation.MaxConnections(svc),
MaxPendingRequests: annotation.MaxPendingRequests(svc),
MaxRequests: annotation.MaxRequests(svc),
MaxRetries: annotation.MaxRetries(svc),
ExternalName: externalName(svc),
Protocol: upstreamProtocol(svc, svcPort),
MaxConnections: annotation.MaxConnections(svc),
MaxPendingRequests: annotation.MaxPendingRequests(svc),
MaxRequests: annotation.MaxRequests(svc),
MaxRetries: annotation.MaxRetries(svc),
PerHostMaxConnections: annotation.PerHostMaxConnections(svc),
ExternalName: externalName(svc),
}, nil
}

Expand Down
18 changes: 10 additions & 8 deletions internal/dag/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7159,10 +7159,11 @@ func TestDAGInsert(t *testing.T) {
Name: "kuard",
Namespace: "default",
Annotations: map[string]string{
"projectcontour.io/max-connections": "9000",
"projectcontour.io/max-pending-requests": "4096",
"projectcontour.io/max-requests": "404",
"projectcontour.io/max-retries": "7",
"projectcontour.io/max-connections": "9000",
"projectcontour.io/max-pending-requests": "4096",
"projectcontour.io/max-requests": "404",
"projectcontour.io/max-retries": "7",
"projectcontour.io/per-host-max-connections": "45",
},
},
Spec: v1.ServiceSpec{
Expand Down Expand Up @@ -10895,10 +10896,11 @@ func TestDAGInsert(t *testing.T) {
ServicePort: s1b.Spec.Ports[0],
HealthPort: s1b.Spec.Ports[0],
},
MaxConnections: 9000,
MaxPendingRequests: 4096,
MaxRequests: 404,
MaxRetries: 7,
MaxConnections: 9000,
MaxPendingRequests: 4096,
MaxRequests: 404,
MaxRetries: 7,
PerHostMaxConnections: 45,
}),
),
),
Expand Down
4 changes: 4 additions & 0 deletions internal/dag/dag.go
Original file line number Diff line number Diff line change
Expand Up @@ -966,6 +966,10 @@ type Service struct {
// Envoy will allow to the upstream cluster.
MaxRetries uint32

// PerHostMaxConnections is the maximum number of connections
// that Envoy will allow to each individual host in a cluster.
PerHostMaxConnections uint32

// ExternalName is an optional field referencing a dns entry for Service type "ExternalName"
ExternalName string
}
Expand Down
5 changes: 4 additions & 1 deletion internal/envoy/v3/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,17 @@ func Cluster(c *dag.Cluster) *envoy_cluster_v3.Cluster {
cluster.IgnoreHealthOnHostRemoval = true
}

if envoy.AnyPositive(service.MaxConnections, service.MaxPendingRequests, service.MaxRequests, service.MaxRetries) {
if envoy.AnyPositive(service.MaxConnections, service.MaxPendingRequests, service.MaxRequests, service.MaxRetries, service.PerHostMaxConnections) {
cluster.CircuitBreakers = &envoy_cluster_v3.CircuitBreakers{
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxConnections: protobuf.UInt32OrNil(service.MaxConnections),
MaxPendingRequests: protobuf.UInt32OrNil(service.MaxPendingRequests),
MaxRequests: protobuf.UInt32OrNil(service.MaxRequests),
MaxRetries: protobuf.UInt32OrNil(service.MaxRetries),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxConnections: protobuf.UInt32OrNil(service.PerHostMaxConnections),
}},
}
}

Expand Down
33 changes: 33 additions & 0 deletions internal/envoy/v3/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,7 @@ func TestCluster(t *testing.T) {
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxConnections: wrapperspb.UInt32(9000),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}},
},
},
},
Expand Down Expand Up @@ -375,6 +376,7 @@ func TestCluster(t *testing.T) {
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxPendingRequests: wrapperspb.UInt32(4096),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}},
},
},
},
Expand Down Expand Up @@ -403,6 +405,7 @@ func TestCluster(t *testing.T) {
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxRequests: wrapperspb.UInt32(404),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}},
},
},
},
Expand Down Expand Up @@ -431,6 +434,36 @@ func TestCluster(t *testing.T) {
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxRetries: wrapperspb.UInt32(7),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}},
},
},
},
"projectcontour.io/per-host-max-connections": {
cluster: &dag.Cluster{
Upstream: &dag.Service{
PerHostMaxConnections: 45,
Weighted: dag.WeightedService{
Weight: 1,
ServiceName: s1.Name,
ServiceNamespace: s1.Namespace,
ServicePort: s1.Spec.Ports[0],
HealthPort: s1.Spec.Ports[0],
},
},
},
want: &envoy_cluster_v3.Cluster{
Name: "default/kuard/443/da39a3ee5e",
AltStatName: "default_kuard_443",
ClusterDiscoveryType: ClusterDiscoveryType(envoy_cluster_v3.Cluster_EDS),
EdsClusterConfig: &envoy_cluster_v3.Cluster_EdsClusterConfig{
EdsConfig: ConfigSource("contour"),
ServiceName: "default/kuard/http",
},
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxConnections: wrapperspb.UInt32(45),
}},
},
},
},
Expand Down
5 changes: 5 additions & 0 deletions internal/featuretests/v3/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -392,6 +392,7 @@ func TestClusterCircuitbreakerAnnotations(t *testing.T) {
Annotate("projectcontour.io/max-pending-requests", "4096").
Annotate("projectcontour.io/max-requests", "404").
Annotate("projectcontour.io/max-retries", "7").
Annotate("projectcontour.io/per-host-max-connections", "45").
WithPorts(v1.ServicePort{Port: 8080, TargetPort: intstr.FromString("8080")})

i1 := &networking_v1.Ingress{
Expand Down Expand Up @@ -425,6 +426,9 @@ func TestClusterCircuitbreakerAnnotations(t *testing.T) {
MaxRequests: wrapperspb.UInt32(404),
MaxRetries: wrapperspb.UInt32(7),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxConnections: wrapperspb.UInt32(45),
}},
},
}),
),
Expand Down Expand Up @@ -455,6 +459,7 @@ func TestClusterCircuitbreakerAnnotations(t *testing.T) {
Thresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxPendingRequests: wrapperspb.UInt32(9999),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{}},
},
}),
),
Expand Down
12 changes: 8 additions & 4 deletions internal/xdscache/v3/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -743,10 +743,11 @@ func TestClusterVisit(t *testing.T) {
"default",
"kuard",
map[string]string{
"projectcontour.io/max-connections": "9000",
"projectcontour.io/max-pending-requests": "4096",
"projectcontour.io/max-requests": "404",
"projectcontour.io/max-retries": "7",
"projectcontour.io/max-connections": "9000",
"projectcontour.io/max-pending-requests": "4096",
"projectcontour.io/max-requests": "404",
"projectcontour.io/max-retries": "7",
"projectcontour.io/per-host-max-connections": "45",
},
v1.ServicePort{
Protocol: "TCP",
Expand All @@ -771,6 +772,9 @@ func TestClusterVisit(t *testing.T) {
MaxRequests: wrapperspb.UInt32(404),
MaxRetries: wrapperspb.UInt32(7),
}},
PerHostThresholds: []*envoy_cluster_v3.CircuitBreakers_Thresholds{{
MaxConnections: wrapperspb.UInt32(45),
}},
},
},
),
Expand Down
4 changes: 3 additions & 1 deletion site/content/docs/main/config/annotations.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ A [Kubernetes Service][9] maps to an [Envoy Cluster][10]. Envoy clusters have ma
- `projectcontour.io/max-pending-requests`: [The maximum number of pending requests][13] that a single Envoy instance allows to the Kubernetes Service; defaults to 1024.
- `projectcontour.io/max-requests`: [The maximum parallel requests][13] a single Envoy instance allows to the Kubernetes Service; defaults to 1024
- `projectcontour.io/max-retries`: [The maximum number of parallel retries][14] a single Envoy instance allows to the Kubernetes Service; defaults to 3. This is independent of the per-Kubernetes Ingress number of retries (`projectcontour.io/num-retries`) and retry-on (`projectcontour.io/retry-on`), which control whether retries are attempted and how many times a single request can retry.
- `projectcontour.io/per-host-max-connections`: [The maximum number of connections][20] that a single Envoy instance allows to an individual Kubernetes Service endpoint; no default (unlimited).
- `projectcontour.io/upstream-protocol.{protocol}` : The protocol used to proxy requests to the upstream service.
The annotation value contains a comma-separated list of port names and/or numbers that must match with the ones defined in the `Service` definition.
This value can also be specified in the `spec.routes.services[].protocol` field on the HTTPProxy object, where it takes precedence over the Service annotation.
Expand Down Expand Up @@ -93,4 +94,5 @@ A [Kubernetes Service][9] maps to an [Envoy Cluster][10]. Envoy clusters have ma
[16]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-virtualhost-require-tls
[17]: api/#projectcontour.io/v1.UpstreamValidation
[18]: ../config/tls-delegation/
[19]: https://github.com/projectcontour/contour/issues/3544
[19]: https://github.com/projectcontour/contour/issues/3544
[20]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-per-host-thresholds

0 comments on commit cf85ee0

Please sign in to comment.