diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 0886364fb0b..0d64f74c423 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -35,9 +35,9 @@ updates:
patterns:
- "actions/upload-artifact"
- "actions/download-artifact"
-
+
# release branch N targets
-- target-branch: release-1.28
+- target-branch: release-1.29
package-ecosystem: "gomod"
directory: "/"
schedule:
@@ -57,7 +57,7 @@ updates:
k8s-dependencies:
patterns:
- "k8s.io/*"
-- target-branch: release-1.28
+- target-branch: release-1.29
package-ecosystem: "github-actions"
directory: "/"
schedule:
@@ -80,7 +80,7 @@ updates:
- "actions/download-artifact"
# release branch N-1 targets
-- target-branch: release-1.27
+- target-branch: release-1.28
package-ecosystem: "gomod"
directory: "/"
schedule:
@@ -100,7 +100,7 @@ updates:
k8s-dependencies:
patterns:
- "k8s.io/*"
-- target-branch: release-1.27
+- target-branch: release-1.28
package-ecosystem: "github-actions"
directory: "/"
schedule:
@@ -123,7 +123,7 @@ updates:
- "actions/download-artifact"
# release branch N-2 targets
-- target-branch: release-1.26
+- target-branch: release-1.27
package-ecosystem: "gomod"
directory: "/"
schedule:
@@ -143,7 +143,7 @@ updates:
k8s-dependencies:
patterns:
- "k8s.io/*"
-- target-branch: release-1.26
+- target-branch: release-1.27
package-ecosystem: "github-actions"
directory: "/"
schedule:
diff --git a/.github/workflows/trivy-scan.yaml b/.github/workflows/trivy-scan.yaml
index 78136a3cad5..0cfb6170762 100644
--- a/.github/workflows/trivy-scan.yaml
+++ b/.github/workflows/trivy-scan.yaml
@@ -16,9 +16,9 @@ jobs:
matrix:
branch:
- main
+ - release-1.29
- release-1.28
- release-1.27
- - release-1.26
runs-on: ubuntu-latest
permissions:
security-events: write
diff --git a/changelogs/CHANGELOG-v1.29.0.md b/changelogs/CHANGELOG-v1.29.0.md
new file mode 100644
index 00000000000..09e9394a5b2
--- /dev/null
+++ b/changelogs/CHANGELOG-v1.29.0.md
@@ -0,0 +1,144 @@
+We are delighted to present version v1.29.0 of Contour, our layer 7 HTTP reverse proxy for Kubernetes clusters.
+
+A big thank you to everyone who contributed to the release.
+
+
+- [Major Changes](#major-changes)
+- [Minor Changes](#minor-changes)
+- [Other Changes](#other-changes)
+- [Docs Changes](#docs-changes)
+- [Deprecations/Removals](#deprecation-and-removal-notices)
+- [Installing/Upgrading](#installing-and-upgrading)
+- [Compatible Kubernetes Versions](#compatible-kubernetes-versions)
+- [Community Thanks!](#community-thanks)
+
+# Major Changes
+
+## Default xDS Server Implementation is now Envoy
+
+As of this release, Contour now uses the `envoy` xDS server implementation by default.
+This xDS server implementation is based on Envoy's [go-control-plane project](https://github.com/envoyproxy/go-control-plane) and will eventually be the only supported xDS server implementation in Contour.
+This change is expected to be transparent to users.
+
+### I'm seeing issues after upgrading, how to I revert to the contour xDS server?
+
+If you encounter any issues, you can easily revert to the `contour` xDS server with the following configuration:
+
+(if using Contour config file)
+```yaml
+server:
+ xds-server-type: contour
+```
+
+(if using ContourConfiguration CRD)
+```yaml
+...
+spec:
+ xdsServer:
+ type: contour
+```
+
+You will need to restart Contour for the changes to take effect.
+
+(#6146, @skriss)
+
+## Gateway API: Inform on v1 types
+
+Contour no longer informs on v1beta1 resources that have graduated to v1.
+This includes the "core" resources GatewayClass, Gateway, and HTTPRoute.
+This means that users should ensure they have updated CRDs to Gateway API v1.0.0 or newer, which introduced the v1 version with compatibility with v1beta1.
+
+(#6153, @sunjayBhatia)
+
+
+# Minor Changes
+
+## Use EndpointSlices by default
+
+Contour now uses the Kubernetes EndpointSlices API by default to determine the endpoints to configure Envoy, instead of the Endpoints API.
+Note: if you need to continue using the Endpoints API, you can disable the feature flag via `featureFlags: ["useEndpointSlices=false"]` in the Contour config file or ContourConfiguration CRD.
+
+(#6149, @izturn)
+
+## Gateway API: handle Route conflicts with HTTPRoute.Matches
+
+It's possible that multiple HTTPRoutes will define the same Match conditions. In this case the following logic is applied to resolve the conflict:
+
+- The oldest Route based on creation timestamp. For example, a Route with a creation timestamp of “2020-09-08 01:02:03” is given precedence over a Route with a creation timestamp of “2020-09-08 01:02:04”.
+- The Route appearing first in alphabetical order (namespace/name) for example, foo/bar is given precedence over foo/baz.
+
+With above ordering, any HTTPRoute that ranks lower, will be marked with below conditions accordionly
+1. If only partial rules under this HTTPRoute are conflicted, it's marked with `Accepted: True` and `PartiallyInvalid: true` Conditions and Reason: `RuleMatchPartiallyConflict`.
+2. If all the rules under this HTTPRoute are conflicted, it's marked with `Accepted: False` Condition and Reason `RuleMatchConflict`.
+
+(#6188, @lubronzhan)
+
+## Spawn Upstream Span is now enabled in tracing
+
+As described in [Envoy documentations](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-tracing), ```spawn_upstream_span``` should be true when envoy is working as an independent proxy and from now on contour tracing spans will show up as a parent span to upstream spans.
+
+(#6271, @SamMHD)
+
+
+# Other Changes
+- Fix data race in BackendTLSPolicy status update logic. (#6185, @sunjayBhatia)
+- Fix for specifying a health check port with an ExternalName Service. (#6230, @yangyy93)
+- Updates the example `envoyproxy/ratelimit` image tag to `19f2079f`, for multi-arch support and other improvements. (#6246, @skriss)
+- In the `envoy` go-control-plane xDS server, use a separate snapshot cache for Endpoints, to minimize the amount of unnecessary xDS traffic generated. (#6250, @skriss)
+- If there were no relevant resources for Contour in the watched namespaces during the startup of a follower instance of Contour, it did not reach a ready state. (#6295, @tsaarni)
+- Added support for enabling circuit breaker statistics tracking. (#6297, @rajatvig)
+- Updates to Go 1.22.2. See the [Go release notes](https://go.dev/doc/devel/release#go1.22.minor) for more information. (#6327, @skriss)
+- Gateway API: add support for HTTPRoute's Timeouts.BackendRequest field. (#6335, @skriss)
+- Updates Envoy to v1.30.1. See the v1.30.0 release notes [here](https://www.envoyproxy.io/docs/envoy/v1.30.1/version_history/v1.30/v1.30.0) and the v1.30.1 release notes [here](https://www.envoyproxy.io/docs/envoy/v1.30.1/version_history/v1.30/v1.30.1). (#6353, @tico88612)
+- Gateway API: a timeout value of `0s` disables the timeout. (#6375, @skriss)
+- Fix provisioner to use separate `--disable-feature` flags on Contour Deployment for each disabled feature. Previously a comma separated list was passed which was incorrect. (#6413, @sunjayBhatia)
+
+
+# Deprecation and Removal Notices
+
+## Configuring Contour with a GatewayClass controller name is no longer supported
+
+Contour can no longer be configured with a GatewayClass controller name (gateway.controllerName in the config file or ContourConfiguration CRD), as the config field has been removed.
+Instead, either use a specific Gateway reference (gateway.gatewayRef), or use the Gateway provisioner.
+
+(#6145, @skriss)
+
+## Contour xDS server implementation is now deprecated
+
+As of this release, the `contour` xDS server implementation is now deprecated.
+Once the go-control-plane based `envoy` xDS server has had sufficient production bake time, the `contour` implementation will be removed from Contour.
+Notification of removal will occur at least one release in advance.
+
+(#6146, @skriss)
+
+## Use of Endpoints API is deprecated
+
+Contour now uses the EndpointSlices API by default, and its usage of the Endpoints API is deprecated as of this release. Support for Endpoints, and the associated `useEndpointSlices` feature flag, will be removed in a future release.
+
+(#6149, @izturn)
+
+
+# Installing and Upgrading
+
+For a fresh install of Contour, consult the [getting started documentation](https://projectcontour.io/getting-started/).
+
+To upgrade an existing Contour installation, please consult the [upgrade documentation](https://projectcontour.io/resources/upgrading/).
+
+
+# Compatible Kubernetes Versions
+
+Contour v1.29.0 is tested against Kubernetes 1.27 through 1.29.
+
+# Community Thanks!
+We’re immensely grateful for all the community contributions that help make Contour even better! For this release, special thanks go out to the following contributors:
+
+- @SamMHD
+- @izturn
+- @lubronzhan
+- @rajatvig
+- @tico88612
+- @yangyy93
+
+
+# Are you a Contour user? We would love to know!
+If you're using Contour and want to add your organization to our adopters list, please visit this [page](https://projectcontour.io/resources/adopters/). If you prefer to keep your organization name anonymous but still give us feedback into your usage and scenarios for Contour, please post on this [GitHub thread](https://github.com/projectcontour/contour/issues/1269).
diff --git a/changelogs/unreleased/6145-skriss-deprecation.md b/changelogs/unreleased/6145-skriss-deprecation.md
deleted file mode 100644
index 168e7666284..00000000000
--- a/changelogs/unreleased/6145-skriss-deprecation.md
+++ /dev/null
@@ -1,4 +0,0 @@
-## Configuring Contour with a GatewayClass controller name is no longer supported
-
-Contour can no longer be configured with a GatewayClass controller name (gateway.controllerName in the config file or ContourConfiguration CRD), as the config field has been removed.
-Instead, either use a specific Gateway reference (gateway.gatewayRef), or use the Gateway provisioner.
diff --git a/changelogs/unreleased/6146-skriss-deprecation.md b/changelogs/unreleased/6146-skriss-deprecation.md
deleted file mode 100644
index 3390cab7e85..00000000000
--- a/changelogs/unreleased/6146-skriss-deprecation.md
+++ /dev/null
@@ -1,5 +0,0 @@
-## Contour xDS server implementation is now deprecated
-
-As of this release, the `contour` xDS server implementation is now deprecated.
-Once the go-control-plane based `envoy` xDS server has had sufficient production bake time, the `contour` implementation will be removed from Contour.
-Notification of removal will occur at least one release in advance.
diff --git a/changelogs/unreleased/6146-skriss-major.md b/changelogs/unreleased/6146-skriss-major.md
deleted file mode 100644
index 5340d272fc7..00000000000
--- a/changelogs/unreleased/6146-skriss-major.md
+++ /dev/null
@@ -1,25 +0,0 @@
-## Default xDS Server Implementation is now Envoy
-
-As of this release, Contour now uses the `envoy` xDS server implementation by default.
-This xDS server implementation is based on Envoy's [go-control-plane project](https://github.com/envoyproxy/go-control-plane) and will eventually be the only supported xDS server implementation in Contour.
-This change is expected to be transparent to users.
-
-### I'm seeing issues after upgrading, how to I revert to the contour xDS server?
-
-If you encounter any issues, you can easily revert to the `contour` xDS server with the following configuration:
-
-(if using Contour config file)
-```yaml
-server:
- xds-server-type: contour
-```
-
-(if using ContourConfiguration CRD
-```yaml
-...
-spec:
- xdsServer:
- type: contour
-```
-
-You will need to restart Contour for the changes to take effect.
diff --git a/changelogs/unreleased/6149-izturn-deprecation.md b/changelogs/unreleased/6149-izturn-deprecation.md
deleted file mode 100644
index 2566d916f15..00000000000
--- a/changelogs/unreleased/6149-izturn-deprecation.md
+++ /dev/null
@@ -1,3 +0,0 @@
-## Use of Endpoints API is deprecated
-
-Contour now uses the EndpointSlices API by default, and its usage of the Endpoints API is deprecated as of this release. Support for Endpoints, and the associated `useEndpointSlices` feature flag, will be removed in a future release.
\ No newline at end of file
diff --git a/changelogs/unreleased/6149-izturn-minor.md b/changelogs/unreleased/6149-izturn-minor.md
deleted file mode 100644
index 9fa8122d473..00000000000
--- a/changelogs/unreleased/6149-izturn-minor.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-## Use EndpointSlices by default
-
-Contour now uses the Kubernetes EndpointSlices API by default to determine the endpoints to configure Envoy, instead of the Endpoints API.
-Note: if you need to continue using the Endpoints API, you can disable the feature flag via `featureFlags: ["useEndpointSlices=false"]` in the Contour config file or ContourConfiguration CRD.
diff --git a/changelogs/unreleased/6153-sunjayBhatia-major.md b/changelogs/unreleased/6153-sunjayBhatia-major.md
deleted file mode 100644
index c35c6e25bbe..00000000000
--- a/changelogs/unreleased/6153-sunjayBhatia-major.md
+++ /dev/null
@@ -1,5 +0,0 @@
-## Gateway API: Inform on v1 types
-
-Contour no longer informs on v1beta1 resources that have graduated to v1.
-This includes the "core" resources GatewayClass, Gateway, and HTTPRoute.
-This means that users should ensure they have updated CRDs to Gateway API v1.0.0 or newer, which introduced the v1 version with compatibility with v1beta1.
diff --git a/changelogs/unreleased/6185-sunjayBhatia-small.md b/changelogs/unreleased/6185-sunjayBhatia-small.md
deleted file mode 100644
index ef1bb0a8169..00000000000
--- a/changelogs/unreleased/6185-sunjayBhatia-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Fix data race in BackendTLSPolicy status update logic.
diff --git a/changelogs/unreleased/6188-lubronzhan-minor.md b/changelogs/unreleased/6188-lubronzhan-minor.md
deleted file mode 100644
index 73240450ad5..00000000000
--- a/changelogs/unreleased/6188-lubronzhan-minor.md
+++ /dev/null
@@ -1,10 +0,0 @@
-## Gateway API: handle Route conflicts with HTTPRoute.Matches
-
-It's possible that multiple HTTPRoutes will define the same Match conditions. In this case the following logic is applied to resolve the conflict:
-
-- The oldest Route based on creation timestamp. For example, a Route with a creation timestamp of “2020-09-08 01:02:03” is given precedence over a Route with a creation timestamp of “2020-09-08 01:02:04”.
-- The Route appearing first in alphabetical order (namespace/name) for example, foo/bar is given precedence over foo/baz.
-
-With above ordering, any HTTPRoute that ranks lower, will be marked with below conditions accordionly
-1. If only partial rules under this HTTPRoute are conflicted, it's marked with `Accepted: True` and `PartiallyInvalid: true` Conditions and Reason: `RuleMatchPartiallyConflict`.
-2. If all the rules under this HTTPRoute are conflicted, it's marked with `Accepted: False` Condition and Reason `RuleMatchConflict`.
diff --git a/changelogs/unreleased/6230-yangyy93-small.md b/changelogs/unreleased/6230-yangyy93-small.md
deleted file mode 100644
index bc07a9b327f..00000000000
--- a/changelogs/unreleased/6230-yangyy93-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Fix for specifying a health check port with an ExternalName Service.
diff --git a/changelogs/unreleased/6246-skriss-small.md b/changelogs/unreleased/6246-skriss-small.md
deleted file mode 100644
index be0bf9fa7d1..00000000000
--- a/changelogs/unreleased/6246-skriss-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Updates the example `envoyproxy/ratelimit` image tag to `19f2079f`, for multi-arch support and other improvements.
\ No newline at end of file
diff --git a/changelogs/unreleased/6250-skriss-small.md b/changelogs/unreleased/6250-skriss-small.md
deleted file mode 100644
index 9a93efe2d60..00000000000
--- a/changelogs/unreleased/6250-skriss-small.md
+++ /dev/null
@@ -1 +0,0 @@
-In the `envoy` go-control-plane xDS server, use a separate snapshot cache for Endpoints, to minimize the amount of unnecessary xDS traffic generated.
\ No newline at end of file
diff --git a/changelogs/unreleased/6271-SamMHD-minor.md b/changelogs/unreleased/6271-SamMHD-minor.md
deleted file mode 100644
index 43cb5cc26f4..00000000000
--- a/changelogs/unreleased/6271-SamMHD-minor.md
+++ /dev/null
@@ -1,4 +0,0 @@
-## Spawn Upstream Span is now enabled in tracing
-
-As described in [Envoy documentations](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-tracing), ```spawn_upstream_span``` should be true when envoy is working as an independent proxy and from now on contour tracing spans will show up as a parent span to upstream spans.
-
diff --git a/changelogs/unreleased/6295-tsaarni-small.md b/changelogs/unreleased/6295-tsaarni-small.md
deleted file mode 100644
index 9747391898f..00000000000
--- a/changelogs/unreleased/6295-tsaarni-small.md
+++ /dev/null
@@ -1 +0,0 @@
-If there were no relevant resources for Contour in the watched namespaces during the startup of a follower instance of Contour, it did not reach a ready state.
diff --git a/changelogs/unreleased/6297-rajatvig-small.md b/changelogs/unreleased/6297-rajatvig-small.md
deleted file mode 100644
index b62c0e8b429..00000000000
--- a/changelogs/unreleased/6297-rajatvig-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Added support for enabling circuit breaker statistics tracking.
diff --git a/changelogs/unreleased/6327-skriss-small.md b/changelogs/unreleased/6327-skriss-small.md
deleted file mode 100644
index e91daa78e17..00000000000
--- a/changelogs/unreleased/6327-skriss-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Updates to Go 1.22.2. See the [Go release notes](https://go.dev/doc/devel/release#go1.22.minor) for more information.
diff --git a/changelogs/unreleased/6335-skriss-small.md b/changelogs/unreleased/6335-skriss-small.md
deleted file mode 100644
index 35dfdfe90d8..00000000000
--- a/changelogs/unreleased/6335-skriss-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Gateway API: add support for HTTPRoute's Timeouts.BackendRequest field.
\ No newline at end of file
diff --git a/changelogs/unreleased/6353-tico88612-small.md b/changelogs/unreleased/6353-tico88612-small.md
deleted file mode 100644
index 533d34e8979..00000000000
--- a/changelogs/unreleased/6353-tico88612-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Updates Envoy to v1.30.1. See the v1.30.0 release notes [here](https://www.envoyproxy.io/docs/envoy/v1.30.1/version_history/v1.30/v1.30.0) and the v1.30.1 release notes [here](https://www.envoyproxy.io/docs/envoy/v1.30.1/version_history/v1.30/v1.30.1).
diff --git a/changelogs/unreleased/6375-skriss-small.md b/changelogs/unreleased/6375-skriss-small.md
deleted file mode 100644
index 6fdfd27f054..00000000000
--- a/changelogs/unreleased/6375-skriss-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Gateway API: a timeout value of `0s` disables the timeout.
\ No newline at end of file
diff --git a/changelogs/unreleased/6413-sunjayBhatia-small.md b/changelogs/unreleased/6413-sunjayBhatia-small.md
deleted file mode 100644
index 192aa97295b..00000000000
--- a/changelogs/unreleased/6413-sunjayBhatia-small.md
+++ /dev/null
@@ -1 +0,0 @@
-Fix provisioner to use separate `--disable-feature` flags on Contour Deployment for each disabled feature. Previously a comma separated list was passed which was incorrect.
diff --git a/site/config.yaml b/site/config.yaml
index 53a7502e048..7abc262ada1 100644
--- a/site/config.yaml
+++ b/site/config.yaml
@@ -29,7 +29,7 @@ params:
github_url: "https://github.com/projectcontour/contour"
github_raw_url: "https://raw.githubusercontent.com/projectcontour/contour"
slack_url: "https://kubernetes.slack.com/messages/contour"
- latest_version: "1.28"
+ latest_version: "1.29"
use_advanced_docs: true
docs_right_sidebar: true
docs_search: true
@@ -39,6 +39,7 @@ params:
docs_versioning: true
docs_versions:
- main
+ - "1.29"
- "1.28"
- "1.27"
- "1.26"
diff --git a/site/content/docs/1.29/_index.md b/site/content/docs/1.29/_index.md
new file mode 100644
index 00000000000..7e76e77d6df
--- /dev/null
+++ b/site/content/docs/1.29/_index.md
@@ -0,0 +1,48 @@
+---
+cascade:
+ layout: docs
+ version: "1.29"
+ branch: release-1.29
+---
+
+## Overview
+Contour is an Ingress controller for Kubernetes that works by deploying the [Envoy proxy][1] as a reverse proxy and load balancer.
+Contour supports dynamic configuration updates out of the box while maintaining a lightweight profile.
+
+## Philosophy
+- Follow an opinionated approach which allows us to better serve most users
+- Design Contour to serve both the cluster administrator and the application developer
+- Use our experience with ingress to define reasonable defaults for both cluster administrators and application developers.
+- Meet users where they are by understanding and adapting Contour to their use cases
+
+See the full [Contour Philosophy][8] page.
+
+## Why Contour?
+Contour bridges other solution gaps in several ways:
+- Dynamically update the ingress configuration with minimal dropped connections
+- Safely support multiple types of ingress config in multi-team Kubernetes clusters
+ - [Ingress/v1][10]
+ - [HTTPProxy (Contour custom resource)][2]
+ - [Gateway API][9]
+- Cleanly integrate with the Kubernetes object model
+
+## Prerequisites
+Contour is tested with Kubernetes clusters running version [1.21 and later][4].
+
+## Get started
+Getting started with Contour is as simple as one command.
+See the [Getting Started][3] document.
+
+## Troubleshooting
+If you encounter issues review the [troubleshooting][5] page, [file an issue][6], or talk to us on the [#contour channel][7] on Kubernetes slack.
+
+[1]: https://www.envoyproxy.io/
+[2]: config/fundamentals.md
+[3]: /getting-started
+[4]: /resources/compatibility-matrix.md
+[5]: /docs/main/troubleshooting
+[6]: https://github.com/projectcontour/contour/issues
+[7]: https://kubernetes.slack.com/messages/contour
+[8]: /resources/philosophy
+[9]: guides/gateway-api
+[10]: /docs/{{< param version >}}/config/ingress
diff --git a/site/content/docs/1.29/architecture.md b/site/content/docs/1.29/architecture.md
new file mode 100644
index 00000000000..b29cb409d39
--- /dev/null
+++ b/site/content/docs/1.29/architecture.md
@@ -0,0 +1,74 @@
+# Contour Architecture
+
+The Contour Ingress controller is a collaboration between:
+
+* Envoy, which provides the high performance reverse proxy.
+* Contour, which acts as a management server for Envoy and provides it with configuration.
+
+These containers are deployed separately, Contour as a Deployment and Envoy as a Kubernetes Daemonset or Deployment, although other configurations are possible.
+
+In the Envoy Pods, Contour runs as an initcontainer in `bootstrap` mode and writes an Envoy bootstrap configuration to a temporary volume.
+This volume is passed to the Envoy container and directs Envoy to treat Contour as its [management server][1].
+
+After initialization is complete, the Envoy container starts, retrieves the bootstrap configuration written by Contour's `bootstrap` mode, and establishes a GRPC session with Contour to receive configuration.
+
+Envoy will gracefully retry if the management server is unavailable, which removes any container startup ordering issues.
+
+Contour is a client of the Kubernetes API.
+Contour watches Ingress, HTTPProxy, Gateway API, Secret, Service, and Endpoint objects, and acts as the management server for its Envoy sibling by translating its cache of objects into the relevant JSON stanzas: Service objects for CDS, Ingress for RDS, Endpoint objects for EDS, and so on).
+
+The transfer of information from Kubernetes to Contour is by watching the Kubernetes API utilizing [controller-runtime][4] primitives.
+
+Kubernetes readiness probes are configured to check whether Envoy is ready to accept connections.
+The Envoy readiness probe sends GET requests to `/ready` in Envoy's administration endpoint.
+
+For Contour, a liveness probe checks the `/healthz` running on the Pod's metrics port.
+Readiness probe is a check that Contour can access the Kubernetes API.
+
+## Architectural Overview
+Below are a couple of high level architectural diagrams of how Contour works inside a Kubernetes cluster as well as showing the data path of a request to a backend pod.
+
+A request to `projectcontour.io/blog` gets routed via a load balancer to an instance of an Envoy proxy which then sends the request to a pod.
+
+![architectural overview][2]
+
+Following is a diagram of how Contour and Envoy are deployed in a Kubernetes cluster.
+
+### Kubernetes API Server
+
+The following API objects are watched:
+- Services
+- Endpoints
+- Secrets
+- Ingress
+- HTTPProxy
+- Gateway API (Optional)
+
+### Contour Deployment
+
+Contour is deployed in the cluster using a Kubernetes Deployment.
+It has built-in leader election which is responsible for updating httproxy/ingress/gateway api resources via Kube API server.
+All instances are able to serve xDS configuration to any Envoy instance, but only the leader can write status back to the API server.
+
+The data being served from contour instances are eventually consistent in an HA based deployment.
+However HA mode is operationally scalable when you have high request rate from envoy to contour as requests are loadbalanced among contour instances.
+This also helps availability zone /data center degradation events as your service continue to function.
+
+### Envoy Deployment
+
+Envoy can be deployed in two different models, as a Kubernetes Daemonset or as a Kubernetes Deployment.
+
+Daemonset is the standard deployment model where a single instance of Envoy is deployed per Kubernetes Node.
+This allows for simple Envoy pod distribution across the cluster as well as being able to expose Envoy using `hostPorts` to improve network performance.
+One potential downside of this deployment model is when a node is removed from the cluster (e.g. on a cluster scale down, etc) then the configured `preStop` hooks are not available so connections can be dropped.
+This is a limitation that applies to any Daemonset in Kubernetes.
+
+An alternative Envoy deployment model is utilizing a Kubernetes Deployment with a configured `podAntiAffinity` which attempts to mirror the Daemonset deployment model.
+A benefit of this model compared to the Daemonset version is when a node is removed from the cluster, the proper shutdown events are available so connections can be cleanly drained from Envoy before terminating.
+
+![architectural overview 2][3]
+
+[1]: https://www.envoyproxy.io/docs/envoy/v1.13.0/api-docs/xds_protocol
+[2]: ../img/archoverview.png
+[3]: ../img/contour_deployment_in_k8s.png
+[4]: https://github.com/kubernetes-sigs/controller-runtime
diff --git a/site/content/docs/1.29/config/access-logging.md b/site/content/docs/1.29/config/access-logging.md
new file mode 100644
index 00000000000..0c5b6e1583c
--- /dev/null
+++ b/site/content/docs/1.29/config/access-logging.md
@@ -0,0 +1,148 @@
+# Access Logging
+
+## Overview
+
+Contour allows you to control Envoy's access logging.
+By default, HTTP and HTTPS access logs are written to `/dev/stdout` by the Envoy containers and look like following:
+
+```
+[2021-04-14T16:36:00.361Z] "GET /foo HTTP/1.1" 200 - 0 463 6 3 "-" "HTTPie/1.0.3" "837aa8dc-344f-4faa-b7d5-c9cce1028519" "localhost:8080" "127.0.0.1:8081"
+```
+
+The detailed description of each field can be found in [Envoy access logging documentation][7].
+
+
+## Customizing Access Log Destination
+
+You can change the destination file where the access log is written by using Contour [command line parameters][1] `--envoy-http-access-log` and `--envoy-https-access-log`.
+
+## Customizing Access Log Format
+
+The access log can take two different formats, both can be customized
+
+* Text based access logs, like shown in the example above.
+* Structured JSON logging.
+
+### Text Based Access Logging
+
+Ensure that you have selected `envoy` as the access log format.
+Note that this is the default format if the parameters are not given.
+
+- Add `--accesslog-format=envoy` to your Contour startup line, or
+- Add `accesslog-format: envoy` to your configuration file.
+
+Customize the access log format by defining `accesslog-format-string` in your configuration file.
+
+```yaml
+accesslog-format-string: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"
+```
+After restarting Contour and successful validation of the configuration, the new format will take effect in a short while.
+
+Refer to [Envoy access logging documentation][7] for the description of the command operators, and note that the format string needs to end in a linefeed `\n`.
+
+### Structured JSON Logging
+
+Contour allows you to choose from a set of JSON fields that will be expanded into Envoy templates and sent to Envoy.
+There is a default set of fields if you enable JSON logging, and you may customize which fields you log.
+
+The list of available fields are discoverable in the following objects:
+- [jsonFields][2] are fields that have built in mappings to commonly used envoy operators.
+- [envoySimpleOperators][3] are the names of simple envoy operators that don't require arguments, they are case-insensitive when configured.
+- [envoyComplexOperators][4] are the names of complex envoy operators that require arguments.
+
+The default list of fields is available at [DefaultAccessLogJSONFields][5].
+
+#### Enabling the Feature
+
+To enable the feature you have two options:
+
+- Add `--accesslog-format=json` to your Contour startup line.
+- Add `accesslog-format: json` to your configuration file.
+
+Without any further customization, the [default fields][5] will be used.
+
+#### Customizing Logged Fields
+
+To customize the logged fields, add a `json-fields` list of strings to your configuration file.
+If the `json-fields` key is not specified, the [default fields][5] will be configured.
+
+To use a value from [jsonFields][2] or [envoySimpleOperators][3], simply include the name of the value in the list of strings.
+The jsonFields are case-sensitive, but envoySimpleOperators are not.
+
+To use [envoyComplexOperators][4] or to use alternative field names, specify strings as key/value pairs like `"fieldName=%OPERATOR(...)%"`.
+
+Unknown field names in non key/value fields will result in validation errors, as will unknown Envoy operators in key/value fields.
+Note that the `DYNAMIC_METADATA` and `FILTER_STATE` Envoy logging operators are not supported at this time due to the complexity of their validation.
+
+See the [example config file][6] to see this used in context.
+
+#### Omitting Logs with Empty Values
+
+Contour automatically omits empty fields in Envoy JSON access logs, enhancing clarity and delivering more concise and relevant log outputs by default.
+
+#### Sample Configuration File
+
+Here is a sample config:
+
+```yaml
+accesslog-format: json
+json-fields:
+ - "@timestamp"
+ - "authority"
+ - "bytes_received"
+ - "bytes_sent"
+ - "customer_id=%REQ(X-CUSTOMER-ID)%"
+ - "downstream_local_address"
+ - "downstream_remote_address"
+ - "duration"
+ - "method"
+ - "path"
+ - "protocol"
+ - "request_id"
+ - "requested_server_name"
+ - "response_code"
+ - "response_flags"
+ - "uber_trace_id"
+ - "upstream_cluster"
+ - "upstream_host"
+ - "upstream_local_address"
+ - "upstream_service_time"
+ - "user_agent"
+ - "x_forwarded_for"
+```
+
+### Logging the route source
+
+Contour can log the kind, namespace and name of the Kubernetes resource that generated the route for a given access log entry.
+
+For text-based access logging, the following command operators can be used:
+- `%METADATA(ROUTE:envoy.access_loggers.file:io.projectcontour.kind)%`
+- `%METADATA(ROUTE:envoy.access_loggers.file:io.projectcontour.namespace)%`
+- `%METADATA(ROUTE:envoy.access_loggers.file:io.projectcontour.name)%`
+
+For JSON access logging, the following fields can be added (these are Contour-specific aliases to the above command operators):
+- `contour_config_kind`
+- `contour_config_namespace`
+- `contour_config_name`
+
+## Using Access Log Formatter Extensions
+
+Envoy allows implementing custom access log command operators as extensions.
+Following extensions are supported by Contour:
+
+| Command operator | Description |
+|------------------|-------------|
+| [REQ_WITHOUT_QUERY][8] | Works the same way as REQ except that it will remove the query string. It is used to avoid logging any sensitive information into the access log. |
+| [METADATA][9] | Prints all types of metadata. |
+
+
+
+[1]: ../configuration#serve-flags
+[2]: https://github.com/search?q=%22var+jsonFields%22+repo%3Aprojectcontour%2Fcontour+path%3Aapis&type=code
+[3]: https://github.com/search?q=%22var+envoySimpleOperators%22+repo%3Aprojectcontour%2Fcontour+path%3Aapis&type=code
+[4]: https://github.com/search?q=%22var+envoyComplexOperators%22+repo%3Aprojectcontour%2Fcontour+path%3Aapis&type=code
+[5]: https://github.com/search?q=%22var+DefaultAccessLogJSONFields%22+repo%3Aprojectcontour%2Fcontour+path%3Aapis&type=code
+[6]: {{< param github_url >}}/tree/{{< param latest_version >}}/examples/contour/01-contour-config.yaml
+[7]: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage
+[8]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/formatter/req_without_query/v3/req_without_query.proto
+[9]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/formatter/metadata/v3/metadata.proto
\ No newline at end of file
diff --git a/site/content/docs/1.29/config/annotations.md b/site/content/docs/1.29/config/annotations.md
new file mode 100644
index 00000000000..4d805f8f0f1
--- /dev/null
+++ b/site/content/docs/1.29/config/annotations.md
@@ -0,0 +1,98 @@
+# Annotations Reference
+
+
+
+Annotations are used in Ingress Controllers to configure features that are not covered by the Kubernetes Ingress API.
+
+Some of the features that have been historically configured via annotations are supported as first-class features in Contour's [HTTPProxy API][15], which provides a more robust configuration interface over annotations.
+
+However, Contour still supports a number of annotations on the Ingress resources.
+
+## Standard Kubernetes Ingress annotations
+
+The following Kubernetes annotations are supported on `Ingress` objects:
+
+### Ingress Class
+
+The Ingress class annotation can be used to specify which Ingress controller should serve a particular Ingress object.
+This annotation may be specified as the standard `kubernetes.io/ingress.class` or a Contour-specific `projectcontour.io/ingress.class`.
+In both cases, they will behave as follows, by default:
+
+* If not set, then all Ingress controllers serve the Ingress.
+* If specified as `kubernetes.io/ingress.class: contour`, then Contour serves the Ingress.
+* If any other value, Contour ignores the Ingress definition.
+
+You can override the default class `contour` by providing the `--ingress-class-name` flag to Contour.
+This can be useful while you are migrating from another controller, or if you need multiple instances of Contour.
+If you do this, the behavior is as follows:
+* If the annotation is not set, Contour will ignore the Ingress.
+* If the annotation is set to any value other than the one passed to the `--ingress-class-name` flag, Contour will ignore the Ingress.
+* If the annotation matches the value that you passed to `--ingress-class-name` flag, Contour will serve the Ingress.
+
+This same logic applies for these annotations on HTTPProxy objects.
+
+_Note: Both `Ingress` and `HTTPProxy` now have an `IngressClassName` field in their spec. Going forward this is the preferred way to specify an ingress class, rather than using an annotation. If both the annotation and the spec field are specified on an object, the annotation takes preference for backwards compatibility._
+
+_Note: The `--ingress-class-name` value can be a comma-separated list of class names to match against. Contour will serve the Ingress or HTTPProxy if the annotation or IngressClassName matches any of the specified class name values.
+
+### Other annotations
+
+ - `ingress.kubernetes.io/force-ssl-redirect`: Requires TLS/SSL for the Ingress to Envoy by setting the [Envoy virtual host option require_tls][16].
+ - `kubernetes.io/ingress.allow-http`: Instructs Contour to not create an Envoy HTTP route for the virtual host. The Ingress exists only for HTTPS requests. Specify `"false"` for Envoy to mark the endpoint as HTTPS only. All other values are ignored.
+
+The `ingress.kubernetes.io/force-ssl-redirect` annotation takes precedence over `kubernetes.io/ingress.allow-http`. If they are set to `"true"` and `"false"` respectively, Contour *will* create an Envoy HTTP route for the Virtual host, and set the `require_tls` virtual host option.
+
+## Contour specific Ingress annotations
+
+ - `projectcontour.io/ingress.class`: The Ingress class that should interpret and serve the Ingress. See the [main Ingress class annotation section](#ingress-class) for more details.
+ - `projectcontour.io/num-retries`: [The maximum number of retries][1] Envoy should make before abandoning and returning an error to the client. Applies only if `projectcontour.io/retry-on` is specified. Set to -1 to disable retries.
+ - `projectcontour.io/per-try-timeout`: [The timeout per retry attempt][2], if there should be one. Applies only if `projectcontour.io/retry-on` is specified.
+ - `projectcontour.io/response-timeout`: [The Envoy HTTP route timeout][3], specified as a [golang duration][4]. By default, Envoy has a 15 second timeout for a backend service to respond. Set this to `infinity` to specify that Envoy should never timeout the connection to the backend. Note that the value `0s` / zero has special semantics for Envoy.
+ - `projectcontour.io/retry-on`: [The conditions for Envoy to retry a request][5]. See also [possible values and their meanings for `retry-on`][6].
+ - `projectcontour.io/tls-minimum-protocol-version`: [The minimum TLS protocol version][7] the TLS listener should support. Valid options are `1.3`, `1.2` (default).
+ - `projectcontour.io/tls-maximum-protocol-version`: [The maximum TLS protocol version][7] the TLS listener should support. Valid options are `1.2`, `1.3` (default).
+ - `projectcontour.io/websocket-routes`: [The routes supporting websocket protocol][8], the annotation value contains a list of route paths separated by a comma that must match with the ones defined in the `Ingress` definition. Defaults to Envoy's default behavior which is `use_websocket` to `false`.
+ - `projectcontour.io/tls-cert-namespace`: The namespace where all TLS secrets of this Ingress are searched. This is necessary to use [TLS Certificate Delegation][18] with Ingress v1 because the slash notation (ex: different-ns/app-cert) used by HTTPProxy and Ingress v1beta1 is not accepted. See [this issue][19] for details.
+
+## Contour specific Service annotations
+
+A [Kubernetes Service][9] maps to an [Envoy Cluster][10]. Envoy clusters have many settings to control specific behaviors. These annotations allow access to some of those settings.
+
+- `projectcontour.io/max-connections`: [The maximum number of connections][11] that a single Envoy instance allows to the Kubernetes Service; defaults to 1024.
+- `projectcontour.io/max-pending-requests`: [The maximum number of pending requests][13] that a single Envoy instance allows to the Kubernetes Service; defaults to 1024.
+- `projectcontour.io/max-requests`: [The maximum parallel requests][13] a single Envoy instance allows to the Kubernetes Service; defaults to 1024
+- `projectcontour.io/max-retries`: [The maximum number of parallel retries][14] a single Envoy instance allows to the Kubernetes Service; defaults to 3. This is independent of the per-Kubernetes Ingress number of retries (`projectcontour.io/num-retries`) and retry-on (`projectcontour.io/retry-on`), which control whether retries are attempted and how many times a single request can retry.
+- `projectcontour.io/per-host-max-connections`: [The maximum number of connections][20] that a single Envoy instance allows to an individual Kubernetes Service endpoint; no default (unlimited).
+- `projectcontour.io/upstream-protocol.{protocol}` : The protocol used to proxy requests to the upstream service.
+ The annotation value contains a comma-separated list of port names and/or numbers that must match with the ones defined in the `Service` definition.
+ This value can also be specified in the `spec.routes.services[].protocol` field on the HTTPProxy object, where it takes precedence over the Service annotation.
+ Supported protocol names are: `h2`, `h2c`, and `tls`:
+ - The `tls` protocol allows for requests which terminate at Envoy to proxy via TLS to the upstream.
+ This protocol should be used for HTTP/1.1 services over TLS.
+ _Note that validating the upstream TLS certificate requires additionally setting the [validation][17] field._
+ - The `h2` protocol proxies requests to the upstream using HTTP/2 over TLS.
+ - The `h2c` protocol proxies requests to the upstream using cleartext HTTP/2.
+
+## Contour specific HTTPProxy annotations
+- `projectcontour.io/ingress.class`: The Ingress class that should interpret and serve the HTTPProxy. See the [main Ingress class annotation section](#ingress-class) for more details.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#config-http-filters-router-x-envoy-max-retries
+[2]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-retrypolicy-retry-on
+[3]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-routeaction-timeout
+[4]: https://golang.org/pkg/time/#ParseDuration
+[5]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-retrypolicy-retry-on
+[6]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#config-http-filters-router-x-envoy-retry-on
+[7]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto.html#extensions-transport-sockets-tls-v3-tlsparameters
+[8]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-routeaction-upgrade-configs
+[9]: https://kubernetes.io/docs/concepts/services-networking/service/
+[10]: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/intro/terminology
+[11]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-thresholds-max-connections
+[12]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-thresholds-max-pending-requests
+[13]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-thresholds-max-requests
+[14]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-thresholds-max-retries
+[15]: fundamentals.md
+[16]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-virtualhost-require-tls
+[17]: api/#projectcontour.io/v1.UpstreamValidation
+[18]: ../config/tls-delegation/
+[19]: https://github.com/projectcontour/contour/issues/3544
+[20]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/circuit_breaker.proto#envoy-v3-api-field-config-cluster-v3-circuitbreakers-per-host-thresholds
\ No newline at end of file
diff --git a/site/content/docs/1.29/config/api-reference.html b/site/content/docs/1.29/config/api-reference.html
new file mode 100644
index 00000000000..b9ba8f7400d
--- /dev/null
+++ b/site/content/docs/1.29/config/api-reference.html
@@ -0,0 +1,9109 @@
+Packages:
+
+projectcontour.io/v1
+
+
Package v1 holds the specification for the projectcontour.io Custom Resource Definitions (CRDs).
+In building this CRD, we’ve inadvertently overloaded the word “Condition”, so we’ve tried to make
+this spec clear as to which types of condition are which.
+MatchConditions
are used by Routes
and Includes
to specify rules to match requests against for either
+routing or inclusion.
+DetailedConditions
are used in the Status
of these objects to hold information about the relevant
+state of the object and the world around it.
+SubConditions
are used underneath DetailedConditions
to give more detail to errors or warnings.
+
+Resource Types:
+
+HTTPProxy
+
+
+
HTTPProxy is an Ingress CRD specification.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+projectcontour.io/v1
+
+ |
+
+
+
+kind
+string
+ |
+HTTPProxy |
+
+
+
+metadata
+
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+
+HTTPProxySpec
+
+
+ |
+
+
+
+
+
+
+virtualhost
+
+
+
+VirtualHost
+
+
+ |
+
+(Optional)
+ Virtualhost appears at most once. If it is present, the object is considered
+to be a “root” HTTPProxy.
+ |
+
+
+
+routes
+
+
+
+[]Route
+
+
+ |
+
+(Optional)
+ Routes are the ingress routes. If TCPProxy is present, Routes is ignored.
+ |
+
+
+
+tcpproxy
+
+
+
+TCPProxy
+
+
+ |
+
+(Optional)
+ TCPProxy holds TCP proxy information.
+ |
+
+
+
+includes
+
+
+
+[]Include
+
+
+ |
+
+(Optional)
+ Includes allow for specific routing configuration to be included from another HTTPProxy,
+possibly in another namespace.
+ |
+
+
+
+ingressClassName
+
+
+string
+
+ |
+
+(Optional)
+ IngressClassName optionally specifies the ingress class to use for this
+HTTPProxy. This replaces the deprecated kubernetes.io/ingress.class
+annotation. For backwards compatibility, when that annotation is set, it
+is given precedence over this field.
+ |
+
+
+ |
+
+
+
+status
+
+
+
+HTTPProxyStatus
+
+
+ |
+
+(Optional)
+ Status is a container for computed information about the HTTPProxy.
+ |
+
+
+
+TLSCertificateDelegation
+
+
+
TLSCertificateDelegation is an TLS Certificate Delegation CRD specification.
+See design/tls-certificate-delegation.md for details.
+
+
+AuthorizationPolicy
+
+
+(Appears on:
+AuthorizationServer,
+Route)
+
+
+
AuthorizationPolicy modifies how client requests are authenticated.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+disabled
+
+
+bool
+
+ |
+
+(Optional)
+ When true, this field disables client request authentication
+for the scope of the policy.
+ |
+
+
+
+context
+
+
+map[string]string
+
+ |
+
+(Optional)
+ Context is a set of key/value pairs that are sent to the
+authentication server in the check request. If a context
+is provided at an enclosing scope, the entries are merged
+such that the inner scope overrides matching keys from the
+outer scope.
+ |
+
+
+
+AuthorizationServer
+
+
+(Appears on:
+VirtualHost,
+ContourConfigurationSpec)
+
+
+
AuthorizationServer configures an external server to authenticate
+client requests. The external server must implement the v3 Envoy
+external authorization GRPC protocol (https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto).
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+extensionRef
+
+
+
+ExtensionServiceReference
+
+
+ |
+
+(Optional)
+ ExtensionServiceRef specifies the extension resource that will authorize client requests.
+ |
+
+
+
+authPolicy
+
+
+
+AuthorizationPolicy
+
+
+ |
+
+(Optional)
+ AuthPolicy sets a default authorization policy for client requests.
+This policy will be used unless overridden by individual routes.
+ |
+
+
+
+responseTimeout
+
+
+string
+
+ |
+
+(Optional)
+ ResponseTimeout configures maximum time to wait for a check response from the authorization server.
+Timeout durations are expressed in the Go Duration format.
+Valid time units are “ns”, “us” (or “µs”), “ms”, “s”, “m”, “h”.
+The string “infinity” is also a valid input and specifies no timeout.
+ |
+
+
+
+failOpen
+
+
+bool
+
+ |
+
+(Optional)
+ If FailOpen is true, the client request is forwarded to the upstream service
+even if the authorization server fails to respond. This field should not be
+set in most cases. It is intended for use only while migrating applications
+from internal authorization to Contour external authorization.
+ |
+
+
+
+withRequestBody
+
+
+
+AuthorizationServerBufferSettings
+
+
+ |
+
+(Optional)
+ WithRequestBody specifies configuration for sending the client request’s body to authorization server.
+ |
+
+
+
+AuthorizationServerBufferSettings
+
+
+(Appears on:
+AuthorizationServer)
+
+
+
AuthorizationServerBufferSettings enables ExtAuthz filter to buffer client request data and send it as part of authorization request
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+maxRequestBytes
+
+
+uint32
+
+ |
+
+(Optional)
+ MaxRequestBytes sets the maximum size of message body ExtAuthz filter will hold in-memory.
+ |
+
+
+
+allowPartialMessage
+
+
+bool
+
+ |
+
+(Optional)
+ If AllowPartialMessage is true, then Envoy will buffer the body until MaxRequestBytes are reached.
+ |
+
+
+
+packAsBytes
+
+
+bool
+
+ |
+
+(Optional)
+ If PackAsBytes is true, the body sent to Authorization Server is in raw bytes.
+ |
+
+
+
+
+
+(Appears on:
+CORSPolicy)
+
+
+
CORSHeaderValue specifies the value of the string headers returned by a cross-domain request.
+
+CORSPolicy
+
+
+(Appears on:
+VirtualHost)
+
+
+
CORSPolicy allows setting the CORS policy
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+allowCredentials
+
+
+bool
+
+ |
+
+(Optional)
+ Specifies whether the resource allows credentials.
+ |
+
+
+
+allowOrigin
+
+
+[]string
+
+ |
+
+ AllowOrigin specifies the origins that will be allowed to do CORS requests.
+Allowed values include “*” which signifies any origin is allowed, an exact
+origin of the form “scheme://host[:port]” (where port is optional), or a valid
+regex pattern.
+Note that regex patterns are validated and a simple “glob” pattern (e.g. *.foo.com)
+will be rejected or produce unexpected matches when applied as a regex.
+ |
+
+
+
+allowMethods
+
+
+
+[]CORSHeaderValue
+
+
+ |
+
+ AllowMethods specifies the content for the access-control-allow-methods header.
+ |
+
+
+
+allowHeaders
+
+
+
+[]CORSHeaderValue
+
+
+ |
+
+(Optional)
+ AllowHeaders specifies the content for the access-control-allow-headers header.
+ |
+
+
+
+exposeHeaders
+
+
+
+[]CORSHeaderValue
+
+
+ |
+
+(Optional)
+ ExposeHeaders Specifies the content for the access-control-expose-headers header.
+ |
+
+
+
+maxAge
+
+
+string
+
+ |
+
+(Optional)
+ MaxAge indicates for how long the results of a preflight request can be cached.
+MaxAge durations are expressed in the Go Duration format.
+Valid time units are “ns”, “us” (or “µs”), “ms”, “s”, “m”, “h”.
+Only positive values are allowed while 0 disables the cache requiring a preflight OPTIONS
+check for all cross-origin requests.
+ |
+
+
+
+allowPrivateNetwork
+
+
+bool
+
+ |
+
+ AllowPrivateNetwork specifies whether to allow private network requests.
+See https://developer.chrome.com/blog/private-network-access-preflight.
+ |
+
+
+
+CertificateDelegation
+
+
+(Appears on:
+TLSCertificateDelegationSpec)
+
+
+
CertificateDelegation maps the authority to reference a secret
+in the current namespace to a set of namespaces.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+secretName
+
+
+string
+
+ |
+
+ required, the name of a secret in the current namespace.
+ |
+
+
+
+targetNamespaces
+
+
+[]string
+
+ |
+
+ required, the namespaces the authority to reference the
+secret will be delegated to.
+If TargetNamespaces is nil or empty, the CertificateDelegation’
+is ignored. If the TargetNamespace list contains the character, “*”
+the secret will be delegated to all namespaces.
+ |
+
+
+
+ClientCertificateDetails
+
+
+(Appears on:
+DownstreamValidation)
+
+
+
ClientCertificateDetails defines which parts of the client certificate will be forwarded.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+subject
+
+
+bool
+
+ |
+
+(Optional)
+ Subject of the client cert.
+ |
+
+
+
+cert
+
+
+bool
+
+ |
+
+(Optional)
+ Client cert in URL encoded PEM format.
+ |
+
+
+
+chain
+
+
+bool
+
+ |
+
+(Optional)
+ Client cert chain (including the leaf cert) in URL encoded PEM format.
+ |
+
+
+
+dns
+
+
+bool
+
+ |
+
+(Optional)
+ DNS type Subject Alternative Names of the client cert.
+ |
+
+
+
+uri
+
+
+bool
+
+ |
+
+(Optional)
+ URI type Subject Alternative Name of the client cert.
+ |
+
+
+
+CookieDomainRewrite
+
+
+(Appears on:
+CookieRewritePolicy)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+value
+
+
+string
+
+ |
+
+ Value is the value to rewrite the Domain attribute to.
+For now this is required.
+ |
+
+
+
+CookiePathRewrite
+
+
+(Appears on:
+CookieRewritePolicy)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+value
+
+
+string
+
+ |
+
+ Value is the value to rewrite the Path attribute to.
+For now this is required.
+ |
+
+
+
+CookieRewritePolicy
+
+
+(Appears on:
+Route,
+Service)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name is the name of the cookie for which attributes will be rewritten.
+ |
+
+
+
+pathRewrite
+
+
+
+CookiePathRewrite
+
+
+ |
+
+(Optional)
+ PathRewrite enables rewriting the Set-Cookie Path element.
+If not set, Path will not be rewritten.
+ |
+
+
+
+domainRewrite
+
+
+
+CookieDomainRewrite
+
+
+ |
+
+(Optional)
+ DomainRewrite enables rewriting the Set-Cookie Domain element.
+If not set, Domain will not be rewritten.
+ |
+
+
+
+secure
+
+
+bool
+
+ |
+
+(Optional)
+ Secure enables rewriting the Set-Cookie Secure element.
+If not set, Secure attribute will not be rewritten.
+ |
+
+
+
+sameSite
+
+
+string
+
+ |
+
+(Optional)
+ SameSite enables rewriting the Set-Cookie SameSite element.
+If not set, SameSite attribute will not be rewritten.
+ |
+
+
+
+DetailedCondition
+
+
+(Appears on:
+HTTPProxyStatus,
+TLSCertificateDelegationStatus,
+ContourConfigurationStatus,
+ExtensionServiceStatus)
+
+
+
DetailedCondition is an extension of the normal Kubernetes conditions, with two extra
+fields to hold sub-conditions, which provide more detailed reasons for the state (True or False)
+of the condition.
+errors
holds information about sub-conditions which are fatal to that condition and render its state False.
+warnings
holds information about sub-conditions which are not fatal to that condition and do not force the state to be False.
+Remember that Conditions have a type, a status, and a reason.
+The type is the type of the condition, the most important one in this CRD set is Valid
.
+Valid
is a positive-polarity condition: when it is status: true
there are no problems.
+In more detail, status: true
means that the object is has been ingested into Contour with no errors.
+warnings
may still be present, and will be indicated in the Reason field. There must be zero entries in the errors
+slice in this case.
+Valid
, status: false
means that the object has had one or more fatal errors during processing into Contour.
+The details of the errors will be present under the errors
field. There must be at least one error in the errors
+slice if status
is false
.
+For DetailedConditions of types other than Valid
, the Condition must be in the negative polarity.
+When they have status
true
, there is an error. There must be at least one entry in the errors
Subcondition slice.
+When they have status
false
, there are no serious errors, and there must be zero entries in the errors
slice.
+In either case, there may be entries in the warnings
slice.
+Regardless of the polarity, the reason
and message
fields must be updated with either the detail of the reason
+(if there is one and only one entry in total across both the errors
and warnings
slices), or
+MultipleReasons
if there is more than one entry.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+Condition
+
+
+
+Kubernetes meta/v1.Condition
+
+
+ |
+
+
+(Members of Condition are embedded into this type.)
+
+ |
+
+
+
+errors
+
+
+
+[]SubCondition
+
+
+ |
+
+(Optional)
+ Errors contains a slice of relevant error subconditions for this object.
+Subconditions are expected to appear when relevant (when there is a error), and disappear when not relevant.
+An empty slice here indicates no errors.
+ |
+
+
+
+warnings
+
+
+
+[]SubCondition
+
+
+ |
+
+(Optional)
+ Warnings contains a slice of relevant warning subconditions for this object.
+Subconditions are expected to appear when relevant (when there is a warning), and disappear when not relevant.
+An empty slice here indicates no warnings.
+ |
+
+
+
+DownstreamValidation
+
+
+(Appears on:
+TLS)
+
+
+
DownstreamValidation defines how to verify the client certificate.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+caSecret
+
+
+string
+
+ |
+
+(Optional)
+ Name of a Kubernetes secret that contains a CA certificate bundle.
+The secret must contain key named ca.crt.
+The client certificate must validate against the certificates in the bundle.
+If specified and SkipClientCertValidation is true, client certificates will
+be required on requests.
+The name can be optionally prefixed with namespace “namespace/name”.
+When cross-namespace reference is used, TLSCertificateDelegation resource must exist in the namespace to grant access to the secret.
+ |
+
+
+
+skipClientCertValidation
+
+
+bool
+
+ |
+
+(Optional)
+ SkipClientCertValidation disables downstream client certificate
+validation. Defaults to false. This field is intended to be used in
+conjunction with external authorization in order to enable the external
+authorization server to validate client certificates. When this field
+is set to true, client certificates are requested but not verified by
+Envoy. If CACertificate is specified, client certificates are required on
+requests, but not verified. If external authorization is in use, they are
+presented to the external authorization server.
+ |
+
+
+
+forwardClientCertificate
+
+
+
+ClientCertificateDetails
+
+
+ |
+
+(Optional)
+ ForwardClientCertificate adds the selected data from the passed client TLS certificate
+to the x-forwarded-client-cert header.
+ |
+
+
+
+crlSecret
+
+
+string
+
+ |
+
+(Optional)
+ Name of a Kubernetes opaque secret that contains a concatenated list of PEM encoded CRLs.
+The secret must contain key named crl.pem.
+This field will be used to verify that a client certificate has not been revoked.
+CRLs must be available from all CAs, unless crlOnlyVerifyLeafCert is true.
+Large CRL lists are not supported since individual secrets are limited to 1MiB in size.
+The name can be optionally prefixed with namespace “namespace/name”.
+When cross-namespace reference is used, TLSCertificateDelegation resource must exist in the namespace to grant access to the secret.
+ |
+
+
+
+crlOnlyVerifyLeafCert
+
+
+bool
+
+ |
+
+(Optional)
+ If this option is set to true, only the certificate at the end of the
+certificate chain will be subject to validation by CRL.
+ |
+
+
+
+optionalClientCertificate
+
+
+bool
+
+ |
+
+(Optional)
+ OptionalClientCertificate when set to true will request a client certificate
+but allow the connection to continue if the client does not provide one.
+If a client certificate is sent, it will be verified according to the
+other properties, which includes disabling validation if
+SkipClientCertValidation is set. Defaults to false.
+ |
+
+
+
+ExtensionServiceReference
+
+
+(Appears on:
+AuthorizationServer)
+
+
+
ExtensionServiceReference names an ExtensionService resource.
+
+
+Feature
+(string
alias)
+
+(Appears on:
+ContourSettings)
+
+
+
+GenericKeyDescriptor
+
+
+(Appears on:
+RateLimitDescriptorEntry)
+
+
+
GenericKeyDescriptor defines a descriptor entry with a static key and
+value.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+key
+
+
+string
+
+ |
+
+(Optional)
+ Key defines the key of the descriptor entry. If not set, the
+key is set to “generic_key”.
+ |
+
+
+
+value
+
+
+string
+
+ |
+
+ Value defines the value of the descriptor entry.
+ |
+
+
+
+GlobalRateLimitPolicy
+
+
+(Appears on:
+RateLimitPolicy,
+RateLimitServiceConfig)
+
+
+
GlobalRateLimitPolicy defines global rate limiting parameters.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+disabled
+
+
+bool
+
+ |
+
+(Optional)
+ Disabled configures the HTTPProxy to not use
+the default global rate limit policy defined by the Contour configuration.
+ |
+
+
+
+descriptors
+
+
+
+[]RateLimitDescriptor
+
+
+ |
+
+(Optional)
+ Descriptors defines the list of descriptors that will
+be generated and sent to the rate limit service. Each
+descriptor contains 1+ key-value pair entries.
+ |
+
+
+
+HTTPDirectResponsePolicy
+
+
+(Appears on:
+Route)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+statusCode
+
+
+int
+
+ |
+
+ StatusCode is the HTTP response status to be returned.
+ |
+
+
+
+body
+
+
+string
+
+ |
+
+(Optional)
+ Body is the content of the response body.
+If this setting is omitted, no body is included in the generated response.
+Note: Body is not recommended to set too long
+otherwise it can have significant resource usage impacts.
+ |
+
+
+
+HTTPHealthCheckPolicy
+
+
+(Appears on:
+Route)
+
+
+
HTTPHealthCheckPolicy defines health checks on the upstream service.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+path
+
+
+string
+
+ |
+
+ HTTP endpoint used to perform health checks on upstream service
+ |
+
+
+
+host
+
+
+string
+
+ |
+
+ The value of the host header in the HTTP health check request.
+If left empty (default value), the name “contour-envoy-healthcheck”
+will be used.
+ |
+
+
+
+intervalSeconds
+
+
+int64
+
+ |
+
+(Optional)
+ The interval (seconds) between health checks
+ |
+
+
+
+timeoutSeconds
+
+
+int64
+
+ |
+
+(Optional)
+ The time to wait (seconds) for a health check response
+ |
+
+
+
+unhealthyThresholdCount
+
+
+int64
+
+ |
+
+(Optional)
+ The number of unhealthy health checks required before a host is marked unhealthy
+ |
+
+
+
+healthyThresholdCount
+
+
+int64
+
+ |
+
+(Optional)
+ The number of healthy health checks required before a host is marked healthy
+ |
+
+
+
+expectedStatuses
+
+
+
+[]HTTPStatusRange
+
+
+ |
+
+(Optional)
+ The ranges of HTTP response statuses considered healthy. Follow half-open
+semantics, i.e. for each range the start is inclusive and the end is exclusive.
+Must be within the range [100,600). If not specified, only a 200 response status
+is considered healthy.
+ |
+
+
+
+HTTPInternalRedirectPolicy
+
+
+(Appears on:
+Route)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+maxInternalRedirects
+
+
+uint32
+
+ |
+
+(Optional)
+ MaxInternalRedirects An internal redirect is not handled, unless the number of previous internal
+redirects that a downstream request has encountered is lower than this value.
+ |
+
+
+
+redirectResponseCodes
+
+
+
+[]RedirectResponseCode
+
+
+ |
+
+(Optional)
+ RedirectResponseCodes If unspecified, only 302 will be treated as internal redirect.
+Only 301, 302, 303, 307 and 308 are valid values.
+ |
+
+
+
+allowCrossSchemeRedirect
+
+
+string
+
+ |
+
+(Optional)
+ AllowCrossSchemeRedirect Allow internal redirect to follow a target URI with a different scheme
+than the value of x-forwarded-proto.
+SafeOnly allows same scheme redirect and safe cross scheme redirect, which means if the downstream
+scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the downstream scheme
+is HTTP, only HTTP redirect targets are allowed.
+ |
+
+
+
+denyRepeatedRouteRedirect
+
+
+bool
+
+ |
+
+(Optional)
+ If DenyRepeatedRouteRedirect is true, rejects redirect targets that are pointing to a route that has
+been followed by a previous redirect from the current route.
+ |
+
+
+
+HTTPProxySpec
+
+
+(Appears on:
+HTTPProxy)
+
+
+
HTTPProxySpec defines the spec of the CRD.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+virtualhost
+
+
+
+VirtualHost
+
+
+ |
+
+(Optional)
+ Virtualhost appears at most once. If it is present, the object is considered
+to be a “root” HTTPProxy.
+ |
+
+
+
+routes
+
+
+
+[]Route
+
+
+ |
+
+(Optional)
+ Routes are the ingress routes. If TCPProxy is present, Routes is ignored.
+ |
+
+
+
+tcpproxy
+
+
+
+TCPProxy
+
+
+ |
+
+(Optional)
+ TCPProxy holds TCP proxy information.
+ |
+
+
+
+includes
+
+
+
+[]Include
+
+
+ |
+
+(Optional)
+ Includes allow for specific routing configuration to be included from another HTTPProxy,
+possibly in another namespace.
+ |
+
+
+
+ingressClassName
+
+
+string
+
+ |
+
+(Optional)
+ IngressClassName optionally specifies the ingress class to use for this
+HTTPProxy. This replaces the deprecated kubernetes.io/ingress.class
+annotation. For backwards compatibility, when that annotation is set, it
+is given precedence over this field.
+ |
+
+
+
+HTTPProxyStatus
+
+
+(Appears on:
+HTTPProxy)
+
+
+
HTTPProxyStatus reports the current state of the HTTPProxy.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+currentStatus
+
+
+string
+
+ |
+
+(Optional)
+ |
+
+
+
+description
+
+
+string
+
+ |
+
+(Optional)
+ |
+
+
+
+loadBalancer
+
+
+
+Kubernetes core/v1.LoadBalancerStatus
+
+
+ |
+
+(Optional)
+ LoadBalancer contains the current status of the load balancer.
+ |
+
+
+
+conditions
+
+
+
+[]DetailedCondition
+
+
+ |
+
+(Optional)
+ Conditions contains information about the current status of the HTTPProxy,
+in an upstream-friendly container.
+Contour will update a single condition, Valid , that is in normal-true polarity.
+That is, when currentStatus is valid , the Valid condition will be status: true ,
+and vice versa.
+Contour will leave untouched any other Conditions set in this block,
+in case some other controller wants to add a Condition.
+If you are another controller owner and wish to add a condition, you should
+namespace your condition with a label, like controller.domain.com/ConditionName .
+ |
+
+
+
+HTTPRequestRedirectPolicy
+
+
+(Appears on:
+Route)
+
+
+
HTTPRequestRedirectPolicy defines configuration for redirecting a request.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+scheme
+
+
+string
+
+ |
+
+(Optional)
+ Scheme is the scheme to be used in the value of the Location
+header in the response.
+When empty, the scheme of the request is used.
+ |
+
+
+
+hostname
+
+
+string
+
+ |
+
+(Optional)
+ Hostname is the precise hostname to be used in the value of the Location
+header in the response.
+When empty, the hostname of the request is used.
+No wildcards are allowed.
+ |
+
+
+
+port
+
+
+int32
+
+ |
+
+(Optional)
+ Port is the port to be used in the value of the Location
+header in the response.
+When empty, port (if specified) of the request is used.
+ |
+
+
+
+statusCode
+
+
+int
+
+ |
+
+(Optional)
+ StatusCode is the HTTP status code to be used in response.
+ |
+
+
+
+path
+
+
+string
+
+ |
+
+(Optional)
+ Path allows for redirection to a different path from the
+original on the request. The path must start with a
+leading slash.
+Note: Only one of Path or Prefix can be defined.
+ |
+
+
+
+prefix
+
+
+string
+
+ |
+
+(Optional)
+ Prefix defines the value to swap the matched prefix or path with.
+The prefix must start with a leading slash.
+Note: Only one of Path or Prefix can be defined.
+ |
+
+
+
+HTTPStatusRange
+
+
+(Appears on:
+HTTPHealthCheckPolicy)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+start
+
+
+int64
+
+ |
+
+ The start (inclusive) of a range of HTTP status codes.
+ |
+
+
+
+end
+
+
+int64
+
+ |
+
+ The end (exclusive) of a range of HTTP status codes.
+ |
+
+
+
+
+
+(Appears on:
+RequestHashPolicy)
+
+
+
HeaderHashOptions contains options to configure a HTTP request header hash
+policy, used in request attribute hash based load balancing.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+headerName
+
+
+string
+
+ |
+
+ HeaderName is the name of the HTTP request header that will be used to
+calculate the hash key. If the header specified is not present on a
+request, no hash will be produced.
+ |
+
+
+
+
+
+(Appears on:
+MatchCondition,
+RequestHeaderValueMatchDescriptor)
+
+
+
HeaderMatchCondition specifies how to conditionally match against HTTP
+headers. The Name field is required, only one of Present, NotPresent,
+Contains, NotContains, Exact, NotExact and Regex can be set.
+For negative matching rules only (e.g. NotContains or NotExact) you can set
+TreatMissingAsEmpty.
+IgnoreCase has no effect for Regex.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name is the name of the header to match against. Name is required.
+Header names are case insensitive.
+ |
+
+
+
+present
+
+
+bool
+
+ |
+
+(Optional)
+ Present specifies that condition is true when the named header
+is present, regardless of its value. Note that setting Present
+to false does not make the condition true if the named header
+is absent.
+ |
+
+
+
+notpresent
+
+
+bool
+
+ |
+
+(Optional)
+ NotPresent specifies that condition is true when the named header
+is not present. Note that setting NotPresent to false does not
+make the condition true if the named header is present.
+ |
+
+
+
+contains
+
+
+string
+
+ |
+
+(Optional)
+ Contains specifies a substring that must be present in
+the header value.
+ |
+
+
+
+notcontains
+
+
+string
+
+ |
+
+(Optional)
+ NotContains specifies a substring that must not be present
+in the header value.
+ |
+
+
+
+ignoreCase
+
+
+bool
+
+ |
+
+(Optional)
+ IgnoreCase specifies that string matching should be case insensitive.
+Note that this has no effect on the Regex parameter.
+ |
+
+
+
+exact
+
+
+string
+
+ |
+
+(Optional)
+ Exact specifies a string that the header value must be equal to.
+ |
+
+
+
+notexact
+
+
+string
+
+ |
+
+(Optional)
+ NoExact specifies a string that the header value must not be
+equal to. The condition is true if the header has any other value.
+ |
+
+
+
+regex
+
+
+string
+
+ |
+
+(Optional)
+ Regex specifies a regular expression pattern that must match the header
+value.
+ |
+
+
+
+treatMissingAsEmpty
+
+
+bool
+
+ |
+
+(Optional)
+ TreatMissingAsEmpty specifies if the header match rule specified header
+does not exist, this header value will be treated as empty. Defaults to false.
+Unlike the underlying Envoy implementation this is only supported for
+negative matches (e.g. NotContains, NotExact).
+ |
+
+
+
+
+
+(Appears on:
+HeadersPolicy,
+LocalRateLimitPolicy)
+
+
+
HeaderValue represents a header name/value pair
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name represents a key of a header
+ |
+
+
+
+value
+
+
+string
+
+ |
+
+ Value represents the value of a header specified by a key
+ |
+
+
+
+
+
+(Appears on:
+Route,
+Service)
+
+
+
HeadersPolicy defines how headers are managed during forwarding.
+The Host
header is treated specially and if set in a HTTP request
+will be used as the SNI server name when forwarding over TLS. It is an
+error to attempt to set the Host
header in a HTTP response.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+set
+
+
+
+[]HeaderValue
+
+
+ |
+
+(Optional)
+ Set specifies a list of HTTP header values that will be set in the HTTP header.
+If the header does not exist it will be added, otherwise it will be overwritten with the new value.
+ |
+
+
+
+remove
+
+
+[]string
+
+ |
+
+(Optional)
+ Remove specifies a list of HTTP header names to remove.
+ |
+
+
+
+IPFilterPolicy
+
+
+(Appears on:
+Route,
+VirtualHost)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+source
+
+
+
+IPFilterSource
+
+
+ |
+
+ Source indicates how to determine the ip address to filter on, and can be
+one of two values:
+- Remote filters on the ip address of the client, accounting for PROXY and
+X-Forwarded-For as needed.
+- Peer filters on the ip of the network request, ignoring PROXY and
+X-Forwarded-For.
+ |
+
+
+
+cidr
+
+
+string
+
+ |
+
+ CIDR is a CIDR block of ipv4 or ipv6 addresses to filter on. This can also be
+a bare IP address (without a mask) to filter on exactly one address.
+ |
+
+
+
+IPFilterSource
+(string
alias)
+
+(Appears on:
+IPFilterPolicy)
+
+
+
IPFilterSource indicates which IP should be considered for filtering
+
+
+
+
+Value |
+Description |
+
+
+"Peer" |
+ |
+
"Remote" |
+ |
+
+
+Include
+
+
+(Appears on:
+HTTPProxySpec)
+
+
+
Include describes a set of policies that can be applied to an HTTPProxy in a namespace.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name of the HTTPProxy
+ |
+
+
+
+namespace
+
+
+string
+
+ |
+
+(Optional)
+ Namespace of the HTTPProxy to include. Defaults to the current namespace if not supplied.
+ |
+
+
+
+conditions
+
+
+
+[]MatchCondition
+
+
+ |
+
+(Optional)
+ Conditions are a set of rules that are applied to included HTTPProxies.
+In effect, they are added onto the Conditions of included HTTPProxy Route
+structs.
+When applied, they are merged using AND, with one exception:
+There can be only one Prefix MatchCondition per Conditions slice.
+More than one Prefix, or contradictory Conditions, will make the
+include invalid. Exact and Regex match conditions are not allowed
+on includes.
+ |
+
+
+
+JWTProvider
+
+
+(Appears on:
+VirtualHost)
+
+
+
JWTProvider defines how to verify JWTs on requests.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Unique name for the provider.
+ |
+
+
+
+default
+
+
+bool
+
+ |
+
+(Optional)
+ Whether the provider should apply to all
+routes in the HTTPProxy/its includes by
+default. At most one provider can be marked
+as the default. If no provider is marked
+as the default, individual routes must explicitly
+identify the provider they require.
+ |
+
+
+
+issuer
+
+
+string
+
+ |
+
+(Optional)
+ Issuer that JWTs are required to have in the “iss” field.
+If not provided, JWT issuers are not checked.
+ |
+
+
+
+audiences
+
+
+[]string
+
+ |
+
+(Optional)
+ Audiences that JWTs are allowed to have in the “aud” field.
+If not provided, JWT audiences are not checked.
+ |
+
+
+
+remoteJWKS
+
+
+
+RemoteJWKS
+
+
+ |
+
+ Remote JWKS to use for verifying JWT signatures.
+ |
+
+
+
+forwardJWT
+
+
+bool
+
+ |
+
+(Optional)
+ Whether the JWT should be forwarded to the backend
+service after successful verification. By default,
+the JWT is not forwarded.
+ |
+
+
+
+JWTVerificationPolicy
+
+
+(Appears on:
+Route)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+require
+
+
+string
+
+ |
+
+(Optional)
+ Require names a specific JWT provider (defined in the virtual host)
+to require for the route. If specified, this field overrides the
+default provider if one exists. If this field is not specified,
+the default provider will be required if one exists. At most one of
+this field or the “disabled” field can be specified.
+ |
+
+
+
+disabled
+
+
+bool
+
+ |
+
+(Optional)
+ Disabled defines whether to disable all JWT verification for this
+route. This can be used to opt specific routes out of the default
+JWT provider for the HTTPProxy. At most one of this field or the
+“require” field can be specified.
+ |
+
+
+
+LoadBalancerPolicy
+
+
+(Appears on:
+Route,
+TCPProxy,
+ExtensionServiceSpec)
+
+
+
LoadBalancerPolicy defines the load balancing policy.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+strategy
+
+
+string
+
+ |
+
+ Strategy specifies the policy used to balance requests
+across the pool of backend pods. Valid policy names are
+Random , RoundRobin , WeightedLeastRequest , Cookie ,
+and RequestHash . If an unknown strategy name is specified
+or no policy is supplied, the default RoundRobin policy
+is used.
+ |
+
+
+
+requestHashPolicies
+
+
+
+[]RequestHashPolicy
+
+
+ |
+
+ RequestHashPolicies contains a list of hash policies to apply when the
+RequestHash load balancing strategy is chosen. If an element of the
+supplied list of hash policies is invalid, it will be ignored. If the
+list of hash policies is empty after validation, the load balancing
+strategy will fall back to the default RoundRobin .
+ |
+
+
+
+LocalRateLimitPolicy
+
+
+(Appears on:
+RateLimitPolicy)
+
+
+
LocalRateLimitPolicy defines local rate limiting parameters.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+requests
+
+
+uint32
+
+ |
+
+ Requests defines how many requests per unit of time should
+be allowed before rate limiting occurs.
+ |
+
+
+
+unit
+
+
+string
+
+ |
+
+ Unit defines the period of time within which requests
+over the limit will be rate limited. Valid values are
+“second”, “minute” and “hour”.
+ |
+
+
+
+burst
+
+
+uint32
+
+ |
+
+(Optional)
+ Burst defines the number of requests above the requests per
+unit that should be allowed within a short period of time.
+ |
+
+
+
+responseStatusCode
+
+
+uint32
+
+ |
+
+(Optional)
+ ResponseStatusCode is the HTTP status code to use for responses
+to rate-limited requests. Codes must be in the 400-599 range
+(inclusive). If not specified, the Envoy default of 429 (Too
+Many Requests) is used.
+ |
+
+
+
+responseHeadersToAdd
+
+
+
+[]HeaderValue
+
+
+ |
+
+(Optional)
+ ResponseHeadersToAdd is an optional list of response headers to
+set when a request is rate-limited.
+ |
+
+
+
+MatchCondition
+
+
+(Appears on:
+Include,
+Route)
+
+
+
MatchCondition are a general holder for matching rules for HTTPProxies.
+One of Prefix, Exact, Regex, Header or QueryParameter must be provided.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+prefix
+
+
+string
+
+ |
+
+(Optional)
+ Prefix defines a prefix match for a request.
+ |
+
+
+
+exact
+
+
+string
+
+ |
+
+(Optional)
+ Exact defines a exact match for a request.
+This field is not allowed in include match conditions.
+ |
+
+
+
+regex
+
+
+string
+
+ |
+
+(Optional)
+ Regex defines a regex match for a request.
+This field is not allowed in include match conditions.
+ |
+
+
+
+header
+
+
+
+HeaderMatchCondition
+
+
+ |
+
+(Optional)
+ Header specifies the header condition to match.
+ |
+
+
+
+queryParameter
+
+
+
+QueryParameterMatchCondition
+
+
+ |
+
+(Optional)
+ QueryParameter specifies the query parameter condition to match.
+ |
+
+
+
+Namespace
+(string
alias)
+
+(Appears on:
+ContourSettings)
+
+
+
Namespace refers to a Kubernetes namespace. It must be a RFC 1123 label.
+This validation is based off of the corresponding Kubernetes validation:
+https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/util/validation/validation.go#L187
+This is used for Namespace name validation here:
+https://github.com/kubernetes/apimachinery/blob/02cfb53916346d085a6c6c7c66f882e3c6b0eca6/pkg/api/validation/generic.go#L63
+Valid values include:
+
+Invalid values include:
+
+- “example.com” - “.” is an invalid character
+
+
+PathRewritePolicy
+
+
+(Appears on:
+Route)
+
+
+
PathRewritePolicy specifies how a request URL path should be
+rewritten. This rewriting takes place after a request is routed
+and has no subsequent effects on the proxy’s routing decision.
+No HTTP headers or body content is rewritten.
+Exactly one field in this struct may be specified.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replacePrefix
+
+
+
+[]ReplacePrefix
+
+
+ |
+
+(Optional)
+ ReplacePrefix describes how the path prefix should be replaced.
+ |
+
+
+
+QueryParameterHashOptions
+
+
+(Appears on:
+RequestHashPolicy)
+
+
+
QueryParameterHashOptions contains options to configure a query parameter based hash
+policy, used in request attribute hash based load balancing.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+parameterName
+
+
+string
+
+ |
+
+ ParameterName is the name of the HTTP request query parameter that will be used to
+calculate the hash key. If the query parameter specified is not present on a
+request, no hash will be produced.
+ |
+
+
+
+QueryParameterMatchCondition
+
+
+(Appears on:
+MatchCondition)
+
+
+
QueryParameterMatchCondition specifies how to conditionally match against HTTP
+query parameters. The Name field is required, only one of Exact, Prefix,
+Suffix, Regex, Contains and Present can be set. IgnoreCase has no effect
+for Regex.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name is the name of the query parameter to match against. Name is required.
+Query parameter names are case insensitive.
+ |
+
+
+
+exact
+
+
+string
+
+ |
+
+(Optional)
+ Exact specifies a string that the query parameter value must be equal to.
+ |
+
+
+
+prefix
+
+
+string
+
+ |
+
+(Optional)
+ Prefix defines a prefix match for the query parameter value.
+ |
+
+
+
+suffix
+
+
+string
+
+ |
+
+(Optional)
+ Suffix defines a suffix match for a query parameter value.
+ |
+
+
+
+regex
+
+
+string
+
+ |
+
+(Optional)
+ Regex specifies a regular expression pattern that must match the query
+parameter value.
+ |
+
+
+
+contains
+
+
+string
+
+ |
+
+(Optional)
+ Contains specifies a substring that must be present in
+the query parameter value.
+ |
+
+
+
+ignoreCase
+
+
+bool
+
+ |
+
+(Optional)
+ IgnoreCase specifies that string matching should be case insensitive.
+Note that this has no effect on the Regex parameter.
+ |
+
+
+
+present
+
+
+bool
+
+ |
+
+(Optional)
+ Present specifies that condition is true when the named query parameter
+is present, regardless of its value. Note that setting Present
+to false does not make the condition true if the named query parameter
+is absent.
+ |
+
+
+
+RateLimitDescriptor
+
+
+(Appears on:
+GlobalRateLimitPolicy)
+
+
+
RateLimitDescriptor defines a list of key-value pair generators.
+
+
+RateLimitDescriptorEntry
+
+
+(Appears on:
+RateLimitDescriptor)
+
+
+
RateLimitDescriptorEntry is a key-value pair generator. Exactly
+one field on this struct must be non-nil.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+genericKey
+
+
+
+GenericKeyDescriptor
+
+
+ |
+
+(Optional)
+ GenericKey defines a descriptor entry with a static key and value.
+ |
+
+
+
+requestHeader
+
+
+
+RequestHeaderDescriptor
+
+
+ |
+
+(Optional)
+ RequestHeader defines a descriptor entry that’s populated only if
+a given header is present on the request. The descriptor key is static,
+and the descriptor value is equal to the value of the header.
+ |
+
+
+
+requestHeaderValueMatch
+
+
+
+RequestHeaderValueMatchDescriptor
+
+
+ |
+
+(Optional)
+ RequestHeaderValueMatch defines a descriptor entry that’s populated
+if the request’s headers match a set of 1+ match criteria. The
+descriptor key is “header_match”, and the descriptor value is static.
+ |
+
+
+
+remoteAddress
+
+
+
+RemoteAddressDescriptor
+
+
+ |
+
+(Optional)
+ RemoteAddress defines a descriptor entry with a key of “remote_address”
+and a value equal to the client’s IP address (from x-forwarded-for).
+ |
+
+
+
+RateLimitPolicy
+
+
+(Appears on:
+Route,
+VirtualHost)
+
+
+
RateLimitPolicy defines rate limiting parameters.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+local
+
+
+
+LocalRateLimitPolicy
+
+
+ |
+
+(Optional)
+ Local defines local rate limiting parameters, i.e. parameters
+for rate limiting that occurs within each Envoy pod as requests
+are handled.
+ |
+
+
+
+global
+
+
+
+GlobalRateLimitPolicy
+
+
+ |
+
+(Optional)
+ Global defines global rate limiting parameters, i.e. parameters
+defining descriptors that are sent to an external rate limit
+service (RLS) for a rate limit decision on each request.
+ |
+
+
+
+RedirectResponseCode
+(uint32
alias)
+
+(Appears on:
+HTTPInternalRedirectPolicy)
+
+
+
RedirectResponseCode is a uint32 type alias with validation to ensure that the value is valid.
+
+RemoteAddressDescriptor
+
+
+(Appears on:
+RateLimitDescriptorEntry)
+
+
+
RemoteAddressDescriptor defines a descriptor entry with a key of
+“remote_address” and a value equal to the client’s IP address
+(from x-forwarded-for).
+
+RemoteJWKS
+
+
+(Appears on:
+JWTProvider)
+
+
+
RemoteJWKS defines how to fetch a JWKS from an HTTP endpoint.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+uri
+
+
+string
+
+ |
+
+ The URI for the JWKS.
+ |
+
+
+
+validation
+
+
+
+UpstreamValidation
+
+
+ |
+
+(Optional)
+ UpstreamValidation defines how to verify the JWKS’s TLS certificate.
+ |
+
+
+
+timeout
+
+
+string
+
+ |
+
+(Optional)
+ How long to wait for a response from the URI.
+If not specified, a default of 1s applies.
+ |
+
+
+
+cacheDuration
+
+
+string
+
+ |
+
+(Optional)
+ How long to cache the JWKS locally. If not specified,
+Envoy’s default of 5m applies.
+ |
+
+
+
+dnsLookupFamily
+
+
+string
+
+ |
+
+(Optional)
+ The DNS IP address resolution policy for the JWKS URI.
+When configured as “v4”, the DNS resolver will only perform a lookup
+for addresses in the IPv4 family. If “v6” is configured, the DNS resolver
+will only perform a lookup for addresses in the IPv6 family.
+If “all” is configured, the DNS resolver
+will perform a lookup for addresses in both the IPv4 and IPv6 family.
+If “auto” is configured, the DNS resolver will first perform a lookup
+for addresses in the IPv6 family and fallback to a lookup for addresses
+in the IPv4 family. If not specified, the Contour-wide setting defined
+in the config file or ContourConfiguration applies (defaults to “auto”).
+See https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto.html#envoy-v3-api-enum-config-cluster-v3-cluster-dnslookupfamily
+for more information.
+ |
+
+
+
+ReplacePrefix
+
+
+(Appears on:
+PathRewritePolicy)
+
+
+
ReplacePrefix describes a path prefix replacement.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+prefix
+
+
+string
+
+ |
+
+(Optional)
+ Prefix specifies the URL path prefix to be replaced.
+If Prefix is specified, it must exactly match the MatchCondition
+prefix that is rendered by the chain of including HTTPProxies
+and only that path prefix will be replaced by Replacement.
+This allows HTTPProxies that are included through multiple
+roots to only replace specific path prefixes, leaving others
+unmodified.
+If Prefix is not specified, all routing prefixes rendered
+by the include chain will be replaced.
+ |
+
+
+
+replacement
+
+
+string
+
+ |
+
+ Replacement is the string that the routing path prefix
+will be replaced with. This must not be empty.
+ |
+
+
+
+RequestHashPolicy
+
+
+(Appears on:
+LoadBalancerPolicy)
+
+
+
RequestHashPolicy contains configuration for an individual hash policy
+on a request attribute.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+terminal
+
+
+bool
+
+ |
+
+ Terminal is a flag that allows for short-circuiting computing of a hash
+for a given request. If set to true, and the request attribute specified
+in the attribute hash options is present, no further hash policies will
+be used to calculate a hash for the request.
+ |
+
+
+
+headerHashOptions
+
+
+
+HeaderHashOptions
+
+
+ |
+
+(Optional)
+ HeaderHashOptions should be set when request header hash based load
+balancing is desired. It must be the only hash option field set,
+otherwise this request hash policy object will be ignored.
+ |
+
+
+
+queryParameterHashOptions
+
+
+
+QueryParameterHashOptions
+
+
+ |
+
+(Optional)
+ QueryParameterHashOptions should be set when request query parameter hash based load
+balancing is desired. It must be the only hash option field set,
+otherwise this request hash policy object will be ignored.
+ |
+
+
+
+hashSourceIP
+
+
+bool
+
+ |
+
+(Optional)
+ HashSourceIP should be set to true when request source IP hash based
+load balancing is desired. It must be the only hash option field set,
+otherwise this request hash policy object will be ignored.
+ |
+
+
+
+
+
+(Appears on:
+RateLimitDescriptorEntry)
+
+
+
RequestHeaderDescriptor defines a descriptor entry that’s populated only
+if a given header is present on the request. The value of the descriptor
+entry is equal to the value of the header (if present).
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+headerName
+
+
+string
+
+ |
+
+ HeaderName defines the name of the header to look for on the request.
+ |
+
+
+
+descriptorKey
+
+
+string
+
+ |
+
+ DescriptorKey defines the key to use on the descriptor entry.
+ |
+
+
+
+
+
+(Appears on:
+RateLimitDescriptorEntry)
+
+
+
RequestHeaderValueMatchDescriptor defines a descriptor entry that’s populated
+if the request’s headers match a set of 1+ match criteria. The descriptor key
+is “header_match”, and the descriptor value is statically defined.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+headers
+
+
+
+[]HeaderMatchCondition
+
+
+ |
+
+ Headers is a list of 1+ match criteria to apply against the request
+to determine whether to populate the descriptor entry or not.
+ |
+
+
+
+expectMatch
+
+
+bool
+
+ |
+
+ ExpectMatch defines whether the request must positively match the match
+criteria in order to generate a descriptor entry (i.e. true), or not
+match the match criteria in order to generate a descriptor entry (i.e. false).
+The default is true.
+ |
+
+
+
+value
+
+
+string
+
+ |
+
+ Value defines the value of the descriptor entry.
+ |
+
+
+
+RetryOn
+(string
alias)
+
+(Appears on:
+RetryPolicy)
+
+
+
RetryOn is a string type alias with validation to ensure that the value is valid.
+
+RetryPolicy
+
+
+(Appears on:
+Route)
+
+
+
RetryPolicy defines the attributes associated with retrying policy.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+count
+
+
+int64
+
+ |
+
+(Optional)
+ NumRetries is maximum allowed number of retries.
+If set to -1, then retries are disabled.
+If set to 0 or not supplied, the value is set
+to the Envoy default of 1.
+ |
+
+
+
+perTryTimeout
+
+
+string
+
+ |
+
+(Optional)
+ PerTryTimeout specifies the timeout per retry attempt.
+Ignored if NumRetries is not supplied.
+ |
+
+
+
+retryOn
+
+
+
+[]RetryOn
+
+
+ |
+
+(Optional)
+ RetryOn specifies the conditions on which to retry a request.
+Supported HTTP conditions:
+
+5xx
+gateway-error
+reset
+connect-failure
+retriable-4xx
+refused-stream
+retriable-status-codes
+retriable-headers
+
+Supported gRPC conditions:
+
+cancelled
+deadline-exceeded
+internal
+resource-exhausted
+unavailable
+
+ |
+
+
+
+retriableStatusCodes
+
+
+[]uint32
+
+ |
+
+(Optional)
+ RetriableStatusCodes specifies the HTTP status codes that should be retried.
+This field is only respected when you include retriable-status-codes in the RetryOn field.
+ |
+
+
+
+Route
+
+
+(Appears on:
+HTTPProxySpec)
+
+
+
Route contains the set of routes for a virtual host.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+conditions
+
+
+
+[]MatchCondition
+
+
+ |
+
+(Optional)
+ Conditions are a set of rules that are applied to a Route.
+When applied, they are merged using AND, with one exception:
+There can be only one Prefix, Exact or Regex MatchCondition
+per Conditions slice. More than one of these condition types,
+or contradictory Conditions, will make the route invalid.
+ |
+
+
+
+services
+
+
+
+[]Service
+
+
+ |
+
+(Optional)
+ Services are the services to proxy traffic.
+ |
+
+
+
+enableWebsockets
+
+
+bool
+
+ |
+
+(Optional)
+ Enables websocket support for the route.
+ |
+
+
+
+permitInsecure
+
+
+bool
+
+ |
+
+(Optional)
+ Allow this path to respond to insecure requests over HTTP which are normally
+not permitted when a virtualhost.tls block is present.
+ |
+
+
+
+authPolicy
+
+
+
+AuthorizationPolicy
+
+
+ |
+
+(Optional)
+ AuthPolicy updates the authorization policy that was set
+on the root HTTPProxy object for client requests that
+match this route.
+ |
+
+
+
+timeoutPolicy
+
+
+
+TimeoutPolicy
+
+
+ |
+
+(Optional)
+ The timeout policy for this route.
+ |
+
+
+
+retryPolicy
+
+
+
+RetryPolicy
+
+
+ |
+
+(Optional)
+ The retry policy for this route.
+ |
+
+
+
+healthCheckPolicy
+
+
+
+HTTPHealthCheckPolicy
+
+
+ |
+
+(Optional)
+ The health check policy for this route.
+ |
+
+
+
+loadBalancerPolicy
+
+
+
+LoadBalancerPolicy
+
+
+ |
+
+(Optional)
+ The load balancing policy for this route.
+ |
+
+
+
+pathRewritePolicy
+
+
+
+PathRewritePolicy
+
+
+ |
+
+(Optional)
+ The policy for rewriting the path of the request URL
+after the request has been routed to a Service.
+ |
+
+
+
+requestHeadersPolicy
+
+
+
+HeadersPolicy
+
+
+ |
+
+(Optional)
+ The policy for managing request headers during proxying.
+You may dynamically rewrite the Host header to be forwarded
+upstream to the content of a request header using
+the below format “%REQ(X-Header-Name)%”. If the value of the header
+is empty, it is ignored.
+*NOTE: Pay attention to the potential security implications of using this option.
+Provided header must come from trusted source.
+**NOTE: The header rewrite is only done while forwarding and has no bearing
+on the routing decision.
+ |
+
+
+
+responseHeadersPolicy
+
+
+
+HeadersPolicy
+
+
+ |
+
+(Optional)
+ The policy for managing response headers during proxying.
+Rewriting the ‘Host’ header is not supported.
+ |
+
+
+
+cookieRewritePolicies
+
+
+
+[]CookieRewritePolicy
+
+
+ |
+
+(Optional)
+ The policies for rewriting Set-Cookie header attributes. Note that
+rewritten cookie names must be unique in this list. Order rewrite
+policies are specified in does not matter.
+ |
+
+
+
+rateLimitPolicy
+
+
+
+RateLimitPolicy
+
+
+ |
+
+(Optional)
+ The policy for rate limiting on the route.
+ |
+
+
+
+requestRedirectPolicy
+
+
+
+HTTPRequestRedirectPolicy
+
+
+ |
+
+(Optional)
+ RequestRedirectPolicy defines an HTTP redirection.
+ |
+
+
+
+directResponsePolicy
+
+
+
+HTTPDirectResponsePolicy
+
+
+ |
+
+(Optional)
+ DirectResponsePolicy returns an arbitrary HTTP response directly.
+ |
+
+
+
+internalRedirectPolicy
+
+
+
+HTTPInternalRedirectPolicy
+
+
+ |
+
+(Optional)
+ The policy to define when to handle redirects responses internally.
+ |
+
+
+
+jwtVerificationPolicy
+
+
+
+JWTVerificationPolicy
+
+
+ |
+
+(Optional)
+ The policy for verifying JWTs for requests to this route.
+ |
+
+
+
+ipAllowPolicy
+
+
+
+[]IPFilterPolicy
+
+
+ |
+
+ IPAllowFilterPolicy is a list of ipv4/6 filter rules for which matching
+requests should be allowed. All other requests will be denied.
+Only one of IPAllowFilterPolicy and IPDenyFilterPolicy can be defined.
+The rules defined here override any rules set on the root HTTPProxy.
+ |
+
+
+
+ipDenyPolicy
+
+
+
+[]IPFilterPolicy
+
+
+ |
+
+ IPDenyFilterPolicy is a list of ipv4/6 filter rules for which matching
+requests should be denied. All other requests will be allowed.
+Only one of IPAllowFilterPolicy and IPDenyFilterPolicy can be defined.
+The rules defined here override any rules set on the root HTTPProxy.
+ |
+
+
+
+Service
+
+
+(Appears on:
+Route,
+TCPProxy)
+
+
+
Service defines an Kubernetes Service to proxy traffic.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name is the name of Kubernetes service to proxy traffic.
+Names defined here will be used to look up corresponding endpoints which contain the ips to route.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+ Port (defined as Integer) to proxy traffic to since a service can have multiple defined.
+ |
+
+
+
+healthPort
+
+
+int
+
+ |
+
+(Optional)
+ HealthPort is the port for this service healthcheck.
+If not specified, Port is used for service healthchecks.
+ |
+
+
+
+protocol
+
+
+string
+
+ |
+
+(Optional)
+ Protocol may be used to specify (or override) the protocol used to reach this Service.
+Values may be tls, h2, h2c. If omitted, protocol-selection falls back on Service annotations.
+ |
+
+
+
+weight
+
+
+int64
+
+ |
+
+(Optional)
+ Weight defines percentage of traffic to balance traffic
+ |
+
+
+
+validation
+
+
+
+UpstreamValidation
+
+
+ |
+
+(Optional)
+ UpstreamValidation defines how to verify the backend service’s certificate
+ |
+
+
+
+mirror
+
+
+bool
+
+ |
+
+ If Mirror is true the Service will receive a read only mirror of the traffic for this route.
+If Mirror is true, then fractional mirroring can be enabled by optionally setting the Weight
+field. Legal values for Weight are 1-100. Omitting the Weight field will result in 100% mirroring.
+NOTE: Setting Weight explicitly to 0 will unexpectedly result in 100% traffic mirroring. This
+occurs since we cannot distinguish omitted fields from those explicitly set to their default
+values
+ |
+
+
+
+requestHeadersPolicy
+
+
+
+HeadersPolicy
+
+
+ |
+
+(Optional)
+ The policy for managing request headers during proxying.
+ |
+
+
+
+responseHeadersPolicy
+
+
+
+HeadersPolicy
+
+
+ |
+
+(Optional)
+ The policy for managing response headers during proxying.
+Rewriting the ‘Host’ header is not supported.
+ |
+
+
+
+cookieRewritePolicies
+
+
+
+[]CookieRewritePolicy
+
+
+ |
+
+(Optional)
+ The policies for rewriting Set-Cookie header attributes.
+ |
+
+
+
+slowStartPolicy
+
+
+
+SlowStartPolicy
+
+
+ |
+
+(Optional)
+ Slow start will gradually increase amount of traffic to a newly added endpoint.
+ |
+
+
+
+SlowStartPolicy
+
+
+(Appears on:
+Service)
+
+
+
SlowStartPolicy will gradually increase amount of traffic to a newly added endpoint.
+It can be used only with RoundRobin and WeightedLeastRequest load balancing strategies.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+window
+
+
+string
+
+ |
+
+ The duration of slow start window.
+Duration is expressed in the Go Duration format.
+Valid time units are “ns”, “us” (or “µs”), “ms”, “s”, “m”, “h”.
+ |
+
+
+
+aggression
+
+
+string
+
+ |
+
+(Optional)
+ The speed of traffic increase over the slow start window.
+Defaults to 1.0, so that endpoint would get linearly increasing amount of traffic.
+When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly.
+The value of aggression parameter should be greater than 0.0.
+More info: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/slow_start
+ |
+
+
+
+minWeightPercent
+
+
+uint32
+
+ |
+
+(Optional)
+ The minimum or starting percentage of traffic to send to new endpoints.
+A non-zero value helps avoid a too small initial weight, which may cause endpoints in slow start mode to receive no traffic in the beginning of the slow start window.
+If not specified, the default is 10%.
+ |
+
+
+
+SubCondition
+
+
+(Appears on:
+DetailedCondition)
+
+
+
SubCondition is a Condition-like type intended for use as a subcondition inside a DetailedCondition.
+It contains a subset of the Condition fields.
+It is intended for warnings and errors, so type
names should use abnormal-true polarity,
+that is, they should be of the form “ErrorPresent: true”.
+The expected lifecycle for these errors is that they should only be present when the error or warning is,
+and should be removed when they are not relevant.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+string
+
+ |
+
+ Type of condition in CamelCase or in foo.example.com/CamelCase .
+This must be in abnormal-true polarity, that is, ErrorFound or controller.io/ErrorFound .
+The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ |
+
+
+
+status
+
+
+
+Kubernetes meta/v1.ConditionStatus
+
+
+ |
+
+ Status of the condition, one of True, False, Unknown.
+ |
+
+
+
+reason
+
+
+string
+
+ |
+
+ Reason contains a programmatic identifier indicating the reason for the condition’s last transition.
+Producers of specific condition types may define expected values and meanings for this field,
+and whether the values are considered a guaranteed API.
+The value should be a CamelCase string.
+This field may not be empty.
+ |
+
+
+
+message
+
+
+string
+
+ |
+
+ Message is a human readable message indicating details about the transition.
+This may be an empty string.
+ |
+
+
+
+TCPHealthCheckPolicy
+
+
+(Appears on:
+TCPProxy)
+
+
+
TCPHealthCheckPolicy defines health checks on the upstream service.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+intervalSeconds
+
+
+int64
+
+ |
+
+(Optional)
+ The interval (seconds) between health checks
+ |
+
+
+
+timeoutSeconds
+
+
+int64
+
+ |
+
+(Optional)
+ The time to wait (seconds) for a health check response
+ |
+
+
+
+unhealthyThresholdCount
+
+
+uint32
+
+ |
+
+(Optional)
+ The number of unhealthy health checks required before a host is marked unhealthy
+ |
+
+
+
+healthyThresholdCount
+
+
+uint32
+
+ |
+
+(Optional)
+ The number of healthy health checks required before a host is marked healthy
+ |
+
+
+
+TCPProxy
+
+
+(Appears on:
+HTTPProxySpec)
+
+
+
TCPProxy contains the set of services to proxy TCP connections.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+loadBalancerPolicy
+
+
+
+LoadBalancerPolicy
+
+
+ |
+
+(Optional)
+ The load balancing policy for the backend services. Note that the
+Cookie and RequestHash load balancing strategies cannot be used
+here.
+ |
+
+
+
+services
+
+
+
+[]Service
+
+
+ |
+
+(Optional)
+ Services are the services to proxy traffic
+ |
+
+
+
+include
+
+
+
+TCPProxyInclude
+
+
+ |
+
+(Optional)
+ Include specifies that this tcpproxy should be delegated to another HTTPProxy.
+ |
+
+
+
+includes
+
+
+
+TCPProxyInclude
+
+
+ |
+
+(Optional)
+ IncludesDeprecated allow for specific routing configuration to be appended to another HTTPProxy in another namespace.
+Exists due to a mistake when developing HTTPProxy and the field was marked plural
+when it should have been singular. This field should stay to not break backwards compatibility to v1 users.
+ |
+
+
+
+healthCheckPolicy
+
+
+
+TCPHealthCheckPolicy
+
+
+ |
+
+(Optional)
+ The health check policy for this tcp proxy
+ |
+
+
+
+TCPProxyInclude
+
+
+(Appears on:
+TCPProxy)
+
+
+
TCPProxyInclude describes a target HTTPProxy document which contains the TCPProxy details.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name of the child HTTPProxy
+ |
+
+
+
+namespace
+
+
+string
+
+ |
+
+(Optional)
+ Namespace of the HTTPProxy to include. Defaults to the current namespace if not supplied.
+ |
+
+
+
+TLS
+
+
+(Appears on:
+VirtualHost)
+
+
+
TLS describes tls properties. The SNI names that will be matched on
+are described in the HTTPProxy’s Spec.VirtualHost.Fqdn field.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+secretName
+
+
+string
+
+ |
+
+ SecretName is the name of a TLS secret.
+Either SecretName or Passthrough must be specified, but not both.
+If specified, the named secret must contain a matching certificate
+for the virtual host’s FQDN.
+The name can be optionally prefixed with namespace “namespace/name”.
+When cross-namespace reference is used, TLSCertificateDelegation resource must exist in the namespace to grant access to the secret.
+ |
+
+
+
+minimumProtocolVersion
+
+
+string
+
+ |
+
+(Optional)
+ MinimumProtocolVersion is the minimum TLS version this vhost should
+negotiate. Valid options are 1.2 (default) and 1.3 . Any other value
+defaults to TLS 1.2.
+ |
+
+
+
+maximumProtocolVersion
+
+
+string
+
+ |
+
+(Optional)
+ MaximumProtocolVersion is the maximum TLS version this vhost should
+negotiate. Valid options are 1.2 and 1.3 (default). Any other value
+defaults to TLS 1.3.
+ |
+
+
+
+passthrough
+
+
+bool
+
+ |
+
+(Optional)
+ Passthrough defines whether the encrypted TLS handshake will be
+passed through to the backing cluster. Either Passthrough or
+SecretName must be specified, but not both.
+ |
+
+
+
+clientValidation
+
+
+
+DownstreamValidation
+
+
+ |
+
+(Optional)
+ ClientValidation defines how to verify the client certificate
+when an external client establishes a TLS connection to Envoy.
+This setting:
+
+- Enables TLS client certificate validation.
+- Specifies how the client certificate will be validated (i.e.
+validation required or skipped).
+
+Note: Setting client certificate validation to be skipped should
+be only used in conjunction with an external authorization server that
+performs client validation as Contour will ensure client certificates
+are passed along.
+ |
+
+
+
+enableFallbackCertificate
+
+
+bool
+
+ |
+
+ EnableFallbackCertificate defines if the vhost should allow a default certificate to
+be applied which handles all requests which don’t match the SNI defined in this vhost.
+ |
+
+
+
+TLSCertificateDelegationSpec
+
+
+(Appears on:
+TLSCertificateDelegation)
+
+
+
TLSCertificateDelegationSpec defines the spec of the CRD
+
+
+TLSCertificateDelegationStatus
+
+
+(Appears on:
+TLSCertificateDelegation)
+
+
+
TLSCertificateDelegationStatus allows for the status of the delegation
+to be presented to the user.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+conditions
+
+
+
+[]DetailedCondition
+
+
+ |
+
+(Optional)
+ Conditions contains information about the current status of the HTTPProxy,
+in an upstream-friendly container.
+Contour will update a single condition, Valid , that is in normal-true polarity.
+That is, when currentStatus is valid , the Valid condition will be status: true ,
+and vice versa.
+Contour will leave untouched any other Conditions set in this block,
+in case some other controller wants to add a Condition.
+If you are another controller owner and wish to add a condition, you should
+namespace your condition with a label, like controller.domain.com\ConditionName .
+ |
+
+
+
+TimeoutPolicy
+
+
+(Appears on:
+Route,
+ExtensionServiceSpec)
+
+
+
TimeoutPolicy configures timeouts that are used for handling network requests.
+TimeoutPolicy durations are expressed in the Go Duration format.
+Valid time units are “ns”, “us” (or “µs”), “ms”, “s”, “m”, “h”.
+The string “infinity” is also a valid input and specifies no timeout.
+A value of “0s” will be treated as if the field were not set, i.e. by using Envoy’s default behavior.
+Example input values: “300ms”, “5s”, “1m”.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+response
+
+
+string
+
+ |
+
+(Optional)
+ Timeout for receiving a response from the server after processing a request from client.
+If not supplied, Envoy’s default value of 15s applies.
+ |
+
+
+
+idle
+
+
+string
+
+ |
+
+(Optional)
+ Timeout for how long the proxy should wait while there is no activity during single request/response (for HTTP/1.1) or stream (for HTTP/2).
+Timeout will not trigger while HTTP/1.1 connection is idle between two consecutive requests.
+If not specified, there is no per-route idle timeout, though a connection manager-wide
+stream_idle_timeout default of 5m still applies.
+ |
+
+
+
+idleConnection
+
+
+string
+
+ |
+
+(Optional)
+ Timeout for how long connection from the proxy to the upstream service is kept when there are no active requests.
+If not supplied, Envoy’s default value of 1h applies.
+ |
+
+
+
+UpstreamValidation
+
+
+(Appears on:
+RemoteJWKS,
+Service,
+ExtensionServiceSpec)
+
+
+
UpstreamValidation defines how to verify the backend service’s certificate
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+caSecret
+
+
+string
+
+ |
+
+ Name or namespaced name of the Kubernetes secret used to validate the certificate presented by the backend.
+The secret must contain key named ca.crt.
+The name can be optionally prefixed with namespace “namespace/name”.
+When cross-namespace reference is used, TLSCertificateDelegation resource must exist in the namespace to grant access to the secret.
+Max length should be the actual max possible length of a namespaced name (63 + 253 + 1 = 317)
+ |
+
+
+
+subjectName
+
+
+string
+
+ |
+
+ Key which is expected to be present in the ‘subjectAltName’ of the presented certificate.
+Deprecated: migrate to using the plural field subjectNames.
+ |
+
+
+
+subjectNames
+
+
+[]string
+
+ |
+
+(Optional)
+ List of keys, of which at least one is expected to be present in the ‘subjectAltName of the
+presented certificate.
+ |
+
+
+
+VirtualHost
+
+
+(Appears on:
+HTTPProxySpec)
+
+
+
VirtualHost appears at most once. If it is present, the object is considered
+to be a “root”.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+fqdn
+
+
+string
+
+ |
+
+ The fully qualified domain name of the root of the ingress tree
+all leaves of the DAG rooted at this object relate to the fqdn.
+ |
+
+
+
+tls
+
+
+
+TLS
+
+
+ |
+
+(Optional)
+ If present the fields describes TLS properties of the virtual
+host. The SNI names that will be matched on are described in fqdn,
+the tls.secretName secret must contain a certificate that itself
+contains a name that matches the FQDN.
+ |
+
+
+
+authorization
+
+
+
+AuthorizationServer
+
+
+ |
+
+(Optional)
+ This field configures an extension service to perform
+authorization for this virtual host. Authorization can
+only be configured on virtual hosts that have TLS enabled.
+If the TLS configuration requires client certificate
+validation, the client certificate is always included in the
+authentication check request.
+ |
+
+
+
+corsPolicy
+
+
+
+CORSPolicy
+
+
+ |
+
+(Optional)
+ Specifies the cross-origin policy to apply to the VirtualHost.
+ |
+
+
+
+rateLimitPolicy
+
+
+
+RateLimitPolicy
+
+
+ |
+
+(Optional)
+ The policy for rate limiting on the virtual host.
+ |
+
+
+
+jwtProviders
+
+
+
+[]JWTProvider
+
+
+ |
+
+(Optional)
+ Providers to use for verifying JSON Web Tokens (JWTs) on the virtual host.
+ |
+
+
+
+ipAllowPolicy
+
+
+
+[]IPFilterPolicy
+
+
+ |
+
+ IPAllowFilterPolicy is a list of ipv4/6 filter rules for which matching
+requests should be allowed. All other requests will be denied.
+Only one of IPAllowFilterPolicy and IPDenyFilterPolicy can be defined.
+The rules defined here may be overridden in a Route.
+ |
+
+
+
+ipDenyPolicy
+
+
+
+[]IPFilterPolicy
+
+
+ |
+
+ IPDenyFilterPolicy is a list of ipv4/6 filter rules for which matching
+requests should be denied. All other requests will be allowed.
+Only one of IPAllowFilterPolicy and IPDenyFilterPolicy can be defined.
+The rules defined here may be overridden in a Route.
+ |
+
+
+
+
+projectcontour.io/v1alpha1
+
+
Package v1alpha1 contains API Schema definitions for the projectcontour.io v1alpha1 API group
+
+Resource Types:
+
+ContourConfiguration
+
+
+
ContourConfiguration is the schema for a Contour instance.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+projectcontour.io/v1alpha1
+
+ |
+
+
+
+kind
+string
+ |
+ContourConfiguration |
+
+
+
+metadata
+
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+
+ContourConfigurationSpec
+
+
+ |
+
+
+
+
+
+
+xdsServer
+
+
+
+XDSServerConfig
+
+
+ |
+
+(Optional)
+ XDSServer contains parameters for the xDS server.
+ |
+
+
+
+ingress
+
+
+
+IngressConfig
+
+
+ |
+
+(Optional)
+ Ingress contains parameters for ingress options.
+ |
+
+
+
+debug
+
+
+
+DebugConfig
+
+
+ |
+
+(Optional)
+ Debug contains parameters to enable debug logging
+and debug interfaces inside Contour.
+ |
+
+
+
+health
+
+
+
+HealthConfig
+
+
+ |
+
+(Optional)
+ Health defines the endpoints Contour uses to serve health checks.
+Contour’s default is { address: “0.0.0.0”, port: 8000 }.
+ |
+
+
+
+envoy
+
+
+
+EnvoyConfig
+
+
+ |
+
+(Optional)
+ Envoy contains parameters for Envoy as well
+as how to optionally configure a managed Envoy fleet.
+ |
+
+
+
+gateway
+
+
+
+GatewayConfig
+
+
+ |
+
+(Optional)
+ Gateway contains parameters for the gateway-api Gateway that Contour
+is configured to serve traffic.
+ |
+
+
+
+httpproxy
+
+
+
+HTTPProxyConfig
+
+
+ |
+
+(Optional)
+ HTTPProxy defines parameters on HTTPProxy.
+ |
+
+
+
+enableExternalNameService
+
+
+bool
+
+ |
+
+(Optional)
+ EnableExternalNameService allows processing of ExternalNameServices
+Contour’s default is false for security reasons.
+ |
+
+
+
+globalExtAuth
+
+
+
+AuthorizationServer
+
+
+ |
+
+(Optional)
+ GlobalExternalAuthorization allows envoys external authorization filter
+to be enabled for all virtual hosts.
+ |
+
+
+
+rateLimitService
+
+
+
+RateLimitServiceConfig
+
+
+ |
+
+(Optional)
+ RateLimitService optionally holds properties of the Rate Limit Service
+to be used for global rate limiting.
+ |
+
+
+
+policy
+
+
+
+PolicyConfig
+
+
+ |
+
+(Optional)
+ Policy specifies default policy applied if not overridden by the user
+ |
+
+
+
+metrics
+
+
+
+MetricsConfig
+
+
+ |
+
+(Optional)
+ Metrics defines the endpoint Contour uses to serve metrics.
+Contour’s default is { address: “0.0.0.0”, port: 8000 }.
+ |
+
+
+
+tracing
+
+
+
+TracingConfig
+
+
+ |
+
+ Tracing defines properties for exporting trace data to OpenTelemetry.
+ |
+
+
+
+featureFlags
+
+
+
+FeatureFlags
+
+
+ |
+
+ FeatureFlags defines toggle to enable new contour features.
+Available toggles are:
+useEndpointSlices - Configures contour to fetch endpoint data
+from k8s endpoint slices. defaults to true,
+If false then reads endpoint data from the k8s endpoints.
+ |
+
+
+ |
+
+
+
+status
+
+
+
+ContourConfigurationStatus
+
+
+ |
+
+(Optional)
+ |
+
+
+
+ContourDeployment
+
+
+
ContourDeployment is the schema for a Contour Deployment.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+projectcontour.io/v1alpha1
+
+ |
+
+
+
+kind
+string
+ |
+ContourDeployment |
+
+
+
+metadata
+
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+
+ContourDeploymentSpec
+
+
+ |
+
+
+
+
+
+
+contour
+
+
+
+ContourSettings
+
+
+ |
+
+(Optional)
+ Contour specifies deployment-time settings for the Contour
+part of the installation, i.e. the xDS server/control plane
+and associated resources, including things like replica count
+for the Deployment, and node placement constraints for the pods.
+ |
+
+
+
+envoy
+
+
+
+EnvoySettings
+
+
+ |
+
+(Optional)
+ Envoy specifies deployment-time settings for the Envoy
+part of the installation, i.e. the xDS client/data plane
+and associated resources, including things like the workload
+type to use (DaemonSet or Deployment), node placement constraints
+for the pods, and various options for the Envoy service.
+ |
+
+
+
+runtimeSettings
+
+
+
+ContourConfigurationSpec
+
+
+ |
+
+(Optional)
+ RuntimeSettings is a ContourConfiguration spec to be used when
+provisioning a Contour instance that will influence aspects of
+the Contour instance’s runtime behavior.
+ |
+
+
+
+resourceLabels
+
+
+map[string]string
+
+ |
+
+(Optional)
+ ResourceLabels is a set of labels to add to the provisioned Contour resources.
+Deprecated: use Gateway.Spec.Infrastructure.Labels instead. This field will be
+removed in a future release.
+ |
+
+
+ |
+
+
+
+status
+
+
+
+ContourDeploymentStatus
+
+
+ |
+
+ |
+
+
+
+ExtensionService
+
+
+
ExtensionService is the schema for the Contour extension services API.
+An ExtensionService resource binds a network service to the Contour
+API so that Contour API features can be implemented by collaborating
+components.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+apiVersion
+string |
+
+
+projectcontour.io/v1alpha1
+
+ |
+
+
+
+kind
+string
+ |
+ExtensionService |
+
+
+
+metadata
+
+
+
+Kubernetes meta/v1.ObjectMeta
+
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+
+ExtensionServiceSpec
+
+
+ |
+
+
+
+
+
+
+services
+
+
+
+[]ExtensionServiceTarget
+
+
+ |
+
+ Services specifies the set of Kubernetes Service resources that
+receive GRPC extension API requests.
+If no weights are specified for any of the entries in
+this array, traffic will be spread evenly across all the
+services.
+Otherwise, traffic is balanced proportionally to the
+Weight field in each entry.
+ |
+
+
+
+validation
+
+
+
+UpstreamValidation
+
+
+ |
+
+(Optional)
+ UpstreamValidation defines how to verify the backend service’s certificate
+ |
+
+
+
+protocol
+
+
+string
+
+ |
+
+(Optional)
+ Protocol may be used to specify (or override) the protocol used to reach this Service.
+Values may be h2 or h2c. If omitted, protocol-selection falls back on Service annotations.
+ |
+
+
+
+loadBalancerPolicy
+
+
+
+LoadBalancerPolicy
+
+
+ |
+
+(Optional)
+ The policy for load balancing GRPC service requests. Note that the
+Cookie and RequestHash load balancing strategies cannot be used
+here.
+ |
+
+
+
+timeoutPolicy
+
+
+
+TimeoutPolicy
+
+
+ |
+
+(Optional)
+ The timeout policy for requests to the services.
+ |
+
+
+
+protocolVersion
+
+
+
+ExtensionProtocolVersion
+
+
+ |
+
+(Optional)
+ This field sets the version of the GRPC protocol that Envoy uses to
+send requests to the extension service. Since Contour always uses the
+v3 Envoy API, this is currently fixed at “v3”. However, other
+protocol options will be available in future.
+ |
+
+
+ |
+
+
+
+status
+
+
+
+ExtensionServiceStatus
+
+
+ |
+
+ |
+
+
+
+
+
+
+AccessLogJSONFields
+([]string
alias)
+
+(Appears on:
+EnvoyLogging)
+
+
+
+AccessLogLevel
+(string
alias)
+
+(Appears on:
+EnvoyLogging)
+
+
+
+
+
+
+Value |
+Description |
+
+
+"critical" |
+Log only requests that result in an server error (i.e. 500+) response code.
+ |
+
"disabled" |
+Disable the access log.
+ |
+
"error" |
+Log only requests that result in a non-success (i.e. 300+) response code
+ |
+
"info" |
+Log all requests. This is the default.
+ |
+
+
+AccessLogType
+(string
alias)
+
+(Appears on:
+EnvoyLogging)
+
+
+
AccessLogType is the name of a supported access logging mechanism.
+
+
+
+
+Value |
+Description |
+
+
+"envoy" |
+DefaultAccessLogType is the default access log format.
+ |
+
"envoy" |
+Set the Envoy access logging to Envoy’s standard format.
+Can be customized using accessLogFormatString .
+ |
+
"json" |
+Set the Envoy access logging to a JSON format.
+Can be customized using jsonFields .
+ |
+
+
+ClusterDNSFamilyType
+(string
alias)
+
+(Appears on:
+ClusterParameters)
+
+
+
ClusterDNSFamilyType is the Ip family to use for resolving DNS
+names in an Envoy cluster config.
+
+
+
+
+Value |
+Description |
+
+
+"all" |
+DNS lookups will attempt both v4 and v6 queries.
+ |
+
"auto" |
+DNS lookups will do a v6 lookup first, followed by a v4 if that fails.
+ |
+
"v4" |
+DNS lookups will only attempt v4 queries.
+ |
+
"v6" |
+DNS lookups will only attempt v6 queries.
+ |
+
+
+ClusterParameters
+
+
+(Appears on:
+EnvoyConfig)
+
+
+
ClusterParameters holds various configurable cluster values.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+dnsLookupFamily
+
+
+
+ClusterDNSFamilyType
+
+
+ |
+
+(Optional)
+ DNSLookupFamily defines how external names are looked up
+When configured as V4, the DNS resolver will only perform a lookup
+for addresses in the IPv4 family. If V6 is configured, the DNS resolver
+will only perform a lookup for addresses in the IPv6 family.
+If AUTO is configured, the DNS resolver will first perform a lookup
+for addresses in the IPv6 family and fallback to a lookup for addresses
+in the IPv4 family. If ALL is specified, the DNS resolver will perform a lookup for
+both IPv4 and IPv6 families, and return all resolved addresses.
+When this is used, Happy Eyeballs will be enabled for upstream connections.
+Refer to Happy Eyeballs Support for more information.
+Note: This only applies to externalName clusters.
+See https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto.html#envoy-v3-api-enum-config-cluster-v3-cluster-dnslookupfamily
+for more information.
+Values: auto (default), v4 , v6 , all .
+Other values will produce an error.
+ |
+
+
+
+maxRequestsPerConnection
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the maximum requests for upstream connections. If not specified, there is no limit.
+see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions
+for more information.
+ |
+
+
+
+per-connection-buffer-limit-bytes
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the soft limit on size of the cluster’s new connection read and write buffers in bytes.
+If unspecified, an implementation defined default is applied (1MiB).
+see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/cluster/v3/cluster.proto#envoy-v3-api-field-config-cluster-v3-cluster-per-connection-buffer-limit-bytes
+for more information.
+ |
+
+
+
+circuitBreakers
+
+
+
+GlobalCircuitBreakerDefaults
+
+
+ |
+
+(Optional)
+ GlobalCircuitBreakerDefaults specifies default circuit breaker budget across all services.
+If defined, this will be used as the default for all services.
+ |
+
+
+
+upstreamTLS
+
+
+
+EnvoyTLS
+
+
+ |
+
+(Optional)
+ UpstreamTLS contains the TLS policy parameters for upstream connections
+ |
+
+
+
+ContourConfigurationSpec
+
+
+(Appears on:
+ContourConfiguration,
+ContourDeploymentSpec)
+
+
+
ContourConfigurationSpec represents a configuration of a Contour controller.
+It contains most of all the options that can be customized, the
+other remaining options being command line flags.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+xdsServer
+
+
+
+XDSServerConfig
+
+
+ |
+
+(Optional)
+ XDSServer contains parameters for the xDS server.
+ |
+
+
+
+ingress
+
+
+
+IngressConfig
+
+
+ |
+
+(Optional)
+ Ingress contains parameters for ingress options.
+ |
+
+
+
+debug
+
+
+
+DebugConfig
+
+
+ |
+
+(Optional)
+ Debug contains parameters to enable debug logging
+and debug interfaces inside Contour.
+ |
+
+
+
+health
+
+
+
+HealthConfig
+
+
+ |
+
+(Optional)
+ Health defines the endpoints Contour uses to serve health checks.
+Contour’s default is { address: “0.0.0.0”, port: 8000 }.
+ |
+
+
+
+envoy
+
+
+
+EnvoyConfig
+
+
+ |
+
+(Optional)
+ Envoy contains parameters for Envoy as well
+as how to optionally configure a managed Envoy fleet.
+ |
+
+
+
+gateway
+
+
+
+GatewayConfig
+
+
+ |
+
+(Optional)
+ Gateway contains parameters for the gateway-api Gateway that Contour
+is configured to serve traffic.
+ |
+
+
+
+httpproxy
+
+
+
+HTTPProxyConfig
+
+
+ |
+
+(Optional)
+ HTTPProxy defines parameters on HTTPProxy.
+ |
+
+
+
+enableExternalNameService
+
+
+bool
+
+ |
+
+(Optional)
+ EnableExternalNameService allows processing of ExternalNameServices
+Contour’s default is false for security reasons.
+ |
+
+
+
+globalExtAuth
+
+
+
+AuthorizationServer
+
+
+ |
+
+(Optional)
+ GlobalExternalAuthorization allows envoys external authorization filter
+to be enabled for all virtual hosts.
+ |
+
+
+
+rateLimitService
+
+
+
+RateLimitServiceConfig
+
+
+ |
+
+(Optional)
+ RateLimitService optionally holds properties of the Rate Limit Service
+to be used for global rate limiting.
+ |
+
+
+
+policy
+
+
+
+PolicyConfig
+
+
+ |
+
+(Optional)
+ Policy specifies default policy applied if not overridden by the user
+ |
+
+
+
+metrics
+
+
+
+MetricsConfig
+
+
+ |
+
+(Optional)
+ Metrics defines the endpoint Contour uses to serve metrics.
+Contour’s default is { address: “0.0.0.0”, port: 8000 }.
+ |
+
+
+
+tracing
+
+
+
+TracingConfig
+
+
+ |
+
+ Tracing defines properties for exporting trace data to OpenTelemetry.
+ |
+
+
+
+featureFlags
+
+
+
+FeatureFlags
+
+
+ |
+
+ FeatureFlags defines toggle to enable new contour features.
+Available toggles are:
+useEndpointSlices - Configures contour to fetch endpoint data
+from k8s endpoint slices. defaults to true,
+If false then reads endpoint data from the k8s endpoints.
+ |
+
+
+
+ContourConfigurationStatus
+
+
+(Appears on:
+ContourConfiguration)
+
+
+
ContourConfigurationStatus defines the observed state of a ContourConfiguration resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+conditions
+
+
+
+[]DetailedCondition
+
+
+ |
+
+(Optional)
+ Conditions contains the current status of the Contour resource.
+Contour will update a single condition, Valid , that is in normal-true polarity.
+Contour will not modify any other Conditions set in this block,
+in case some other controller wants to add a Condition.
+ |
+
+
+
+ContourDeploymentSpec
+
+
+(Appears on:
+ContourDeployment)
+
+
+
ContourDeploymentSpec specifies options for how a Contour
+instance should be provisioned.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+contour
+
+
+
+ContourSettings
+
+
+ |
+
+(Optional)
+ Contour specifies deployment-time settings for the Contour
+part of the installation, i.e. the xDS server/control plane
+and associated resources, including things like replica count
+for the Deployment, and node placement constraints for the pods.
+ |
+
+
+
+envoy
+
+
+
+EnvoySettings
+
+
+ |
+
+(Optional)
+ Envoy specifies deployment-time settings for the Envoy
+part of the installation, i.e. the xDS client/data plane
+and associated resources, including things like the workload
+type to use (DaemonSet or Deployment), node placement constraints
+for the pods, and various options for the Envoy service.
+ |
+
+
+
+runtimeSettings
+
+
+
+ContourConfigurationSpec
+
+
+ |
+
+(Optional)
+ RuntimeSettings is a ContourConfiguration spec to be used when
+provisioning a Contour instance that will influence aspects of
+the Contour instance’s runtime behavior.
+ |
+
+
+
+resourceLabels
+
+
+map[string]string
+
+ |
+
+(Optional)
+ ResourceLabels is a set of labels to add to the provisioned Contour resources.
+Deprecated: use Gateway.Spec.Infrastructure.Labels instead. This field will be
+removed in a future release.
+ |
+
+
+
+ContourDeploymentStatus
+
+
+(Appears on:
+ContourDeployment)
+
+
+
ContourDeploymentStatus defines the observed state of a ContourDeployment resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+conditions
+
+
+
+[]Kubernetes meta/v1.Condition
+
+
+ |
+
+(Optional)
+ Conditions describe the current conditions of the ContourDeployment resource.
+ |
+
+
+
+ContourSettings
+
+
+(Appears on:
+ContourDeploymentSpec)
+
+
+
ContourSettings contains settings for the Contour part of the installation,
+i.e. the xDS server/control plane and associated resources.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replicas
+
+
+int32
+
+ |
+
+(Optional)
+ Deprecated: Use DeploymentSettings.Replicas instead.
+Replicas is the desired number of Contour replicas. If if unset,
+defaults to 2.
+if both DeploymentSettings.Replicas and this one is set, use DeploymentSettings.Replicas .
+ |
+
+
+
+nodePlacement
+
+
+
+NodePlacement
+
+
+ |
+
+(Optional)
+ NodePlacement describes node scheduling configuration of Contour pods.
+ |
+
+
+
+kubernetesLogLevel
+
+
+byte
+
+ |
+
+(Optional)
+ KubernetesLogLevel Enable Kubernetes client debug logging with log level. If unset,
+defaults to 0.
+ |
+
+
+
+logLevel
+
+
+
+LogLevel
+
+
+ |
+
+(Optional)
+ LogLevel sets the log level for Contour
+Allowed values are “info”, “debug”.
+ |
+
+
+
+resources
+
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+(Optional)
+ Compute Resources required by contour container.
+Cannot be updated.
+More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ |
+
+
+
+deployment
+
+
+
+DeploymentSettings
+
+
+ |
+
+(Optional)
+ Deployment describes the settings for running contour as a Deployment .
+ |
+
+
+
+podAnnotations
+
+
+map[string]string
+
+ |
+
+(Optional)
+ PodAnnotations defines annotations to add to the Contour pods.
+the annotations for Prometheus will be appended or overwritten with predefined value.
+ |
+
+
+
+watchNamespaces
+
+
+
+[]Namespace
+
+
+ |
+
+(Optional)
+ WatchNamespaces is an array of namespaces. Setting it will instruct the contour instance
+to only watch this subset of namespaces.
+ |
+
+
+
+disabledFeatures
+
+
+
+[]Feature
+
+
+ |
+
+(Optional)
+ DisabledFeatures defines an array of resources that will be ignored by
+contour reconciler.
+ |
+
+
+
+CustomTag
+
+
+
CustomTag defines custom tags with unique tag name
+to create tags for the active span.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+tagName
+
+
+string
+
+ |
+
+ TagName is the unique name of the custom tag.
+ |
+
+
+
+literal
+
+
+string
+
+ |
+
+(Optional)
+ Literal is a static custom tag value.
+Precisely one of Literal, RequestHeaderName must be set.
+ |
+
+
+
+requestHeaderName
+
+
+string
+
+ |
+
+(Optional)
+ RequestHeaderName indicates which request header
+the label value is obtained from.
+Precisely one of Literal, RequestHeaderName must be set.
+ |
+
+
+
+DaemonSetSettings
+
+
+(Appears on:
+EnvoySettings)
+
+
+
DaemonSetSettings contains settings for DaemonSet resources.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+updateStrategy
+
+
+
+Kubernetes apps/v1.DaemonSetUpdateStrategy
+
+
+ |
+
+(Optional)
+ Strategy describes the deployment strategy to use to replace existing DaemonSet pods with new pods.
+ |
+
+
+
+DebugConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
DebugConfig contains Contour specific troubleshooting options.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+address
+
+
+string
+
+ |
+
+(Optional)
+ Defines the Contour debug address interface.
+Contour’s default is “127.0.0.1”.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+(Optional)
+ Defines the Contour debug address port.
+Contour’s default is 6060.
+ |
+
+
+
+DeploymentSettings
+
+
+(Appears on:
+ContourSettings,
+EnvoySettings)
+
+
+
DeploymentSettings contains settings for Deployment resources.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+replicas
+
+
+int32
+
+ |
+
+ Replicas is the desired number of replicas.
+ |
+
+
+
+strategy
+
+
+
+Kubernetes apps/v1.DeploymentStrategy
+
+
+ |
+
+(Optional)
+ Strategy describes the deployment strategy to use to replace existing pods with new pods.
+ |
+
+
+
+EnvoyConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
EnvoyConfig defines how Envoy is to be Configured from Contour.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+listener
+
+
+
+EnvoyListenerConfig
+
+
+ |
+
+(Optional)
+ Listener hold various configurable Envoy listener values.
+ |
+
+
+
+service
+
+
+
+NamespacedName
+
+
+ |
+
+(Optional)
+ Service holds Envoy service parameters for setting Ingress status.
+Contour’s default is { namespace: “projectcontour”, name: “envoy” }.
+ |
+
+
+
+http
+
+
+
+EnvoyListener
+
+
+ |
+
+(Optional)
+ Defines the HTTP Listener for Envoy.
+Contour’s default is { address: “0.0.0.0”, port: 8080, accessLog: “/dev/stdout” }.
+ |
+
+
+
+https
+
+
+
+EnvoyListener
+
+
+ |
+
+(Optional)
+ Defines the HTTPS Listener for Envoy.
+Contour’s default is { address: “0.0.0.0”, port: 8443, accessLog: “/dev/stdout” }.
+ |
+
+
+
+health
+
+
+
+HealthConfig
+
+
+ |
+
+(Optional)
+ Health defines the endpoint Envoy uses to serve health checks.
+Contour’s default is { address: “0.0.0.0”, port: 8002 }.
+ |
+
+
+
+metrics
+
+
+
+MetricsConfig
+
+
+ |
+
+(Optional)
+ Metrics defines the endpoint Envoy uses to serve metrics.
+Contour’s default is { address: “0.0.0.0”, port: 8002 }.
+ |
+
+
+
+clientCertificate
+
+
+
+NamespacedName
+
+
+ |
+
+(Optional)
+ ClientCertificate defines the namespace/name of the Kubernetes
+secret containing the client certificate and private key
+to be used when establishing TLS connection to upstream
+cluster.
+ |
+
+
+
+logging
+
+
+
+EnvoyLogging
+
+
+ |
+
+(Optional)
+ Logging defines how Envoy’s logs can be configured.
+ |
+
+
+
+defaultHTTPVersions
+
+
+
+[]HTTPVersionType
+
+
+ |
+
+(Optional)
+ DefaultHTTPVersions defines the default set of HTTPS
+versions the proxy should accept. HTTP versions are
+strings of the form “HTTP/xx”. Supported versions are
+“HTTP/1.1” and “HTTP/2”.
+Values: HTTP/1.1 , HTTP/2 (default: both).
+Other values will produce an error.
+ |
+
+
+
+timeouts
+
+
+
+TimeoutParameters
+
+
+ |
+
+(Optional)
+ Timeouts holds various configurable timeouts that can
+be set in the config file.
+ |
+
+
+
+cluster
+
+
+
+ClusterParameters
+
+
+ |
+
+(Optional)
+ Cluster holds various configurable Envoy cluster values that can
+be set in the config file.
+ |
+
+
+
+network
+
+
+
+NetworkParameters
+
+
+ |
+
+(Optional)
+ Network holds various configurable Envoy network values.
+ |
+
+
+
+EnvoyListener
+
+
+(Appears on:
+EnvoyConfig)
+
+
+
EnvoyListener defines parameters for an Envoy Listener.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+address
+
+
+string
+
+ |
+
+(Optional)
+ Defines an Envoy Listener Address.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+(Optional)
+ Defines an Envoy listener Port.
+ |
+
+
+
+accessLog
+
+
+string
+
+ |
+
+(Optional)
+ AccessLog defines where Envoy logs are outputted for this listener.
+ |
+
+
+
+EnvoyListenerConfig
+
+
+(Appears on:
+EnvoyConfig)
+
+
+
EnvoyListenerConfig hold various configurable Envoy listener values.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+useProxyProtocol
+
+
+bool
+
+ |
+
+(Optional)
+ Use PROXY protocol for all listeners.
+Contour’s default is false.
+ |
+
+
+
+disableAllowChunkedLength
+
+
+bool
+
+ |
+
+(Optional)
+ DisableAllowChunkedLength disables the RFC-compliant Envoy behavior to
+strip the “Content-Length” header if “Transfer-Encoding: chunked” is
+also set. This is an emergency off-switch to revert back to Envoy’s
+default behavior in case of failures. Please file an issue if failures
+are encountered.
+See: https://github.com/projectcontour/contour/issues/3221
+Contour’s default is false.
+ |
+
+
+
+disableMergeSlashes
+
+
+bool
+
+ |
+
+(Optional)
+ DisableMergeSlashes disables Envoy’s non-standard merge_slashes path transformation option
+which strips duplicate slashes from request URL paths.
+Contour’s default is false.
+ |
+
+
+
+serverHeaderTransformation
+
+
+
+ServerHeaderTransformationType
+
+
+ |
+
+(Optional)
+ Defines the action to be applied to the Server header on the response path.
+When configured as overwrite, overwrites any Server header with “envoy”.
+When configured as append_if_absent, if a Server header is present, pass it through, otherwise set it to “envoy”.
+When configured as pass_through, pass through the value of the Server header, and do not append a header if none is present.
+Values: overwrite (default), append_if_absent , pass_through
+Other values will produce an error.
+Contour’s default is overwrite.
+ |
+
+
+
+connectionBalancer
+
+
+string
+
+ |
+
+(Optional)
+ ConnectionBalancer. If the value is exact, the listener will use the exact connection balancer
+See https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/listener.proto#envoy-api-msg-listener-connectionbalanceconfig
+for more information.
+Values: (empty string): use the default ConnectionBalancer, exact : use the Exact ConnectionBalancer.
+Other values will produce an error.
+ |
+
+
+
+maxRequestsPerConnection
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the maximum requests for downstream connections. If not specified, there is no limit.
+see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions
+for more information.
+ |
+
+
+
+per-connection-buffer-limit-bytes
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the soft limit on size of the listener’s new connection read and write buffers in bytes.
+If unspecified, an implementation defined default is applied (1MiB).
+see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/listener/v3/listener.proto#envoy-v3-api-field-config-listener-v3-listener-per-connection-buffer-limit-bytes
+for more information.
+ |
+
+
+
+tls
+
+
+
+EnvoyTLS
+
+
+ |
+
+(Optional)
+ TLS holds various configurable Envoy TLS listener values.
+ |
+
+
+
+socketOptions
+
+
+
+SocketOptions
+
+
+ |
+
+(Optional)
+ SocketOptions defines configurable socket options for the listeners.
+Single set of options are applied to all listeners.
+ |
+
+
+
+maxRequestsPerIOCycle
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the limit on number of HTTP requests that Envoy will process from a single
+connection in a single I/O cycle. Requests over this limit are processed in subsequent
+I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is
+detected. Configures the http.max_requests_per_io_cycle Envoy runtime setting. The default
+value when this is not set is no limit.
+ |
+
+
+
+httpMaxConcurrentStreams
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the
+SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed
+for a peer on a single HTTP/2 connection. It is recommended to not set this lower
+than 100 but this field can be used to bound resource usage by HTTP/2 connections
+and mitigate attacks like CVE-2023-44487. The default value when this is not set is
+unlimited.
+ |
+
+
+
+maxConnectionsPerListener
+
+
+uint32
+
+ |
+
+(Optional)
+ Defines the limit on number of active connections to a listener. The limit is applied
+per listener. The default value when this is not set is unlimited.
+ |
+
+
+
+EnvoyLogging
+
+
+(Appears on:
+EnvoyConfig)
+
+
+
EnvoyLogging defines how Envoy’s logs can be configured.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+accessLogFormat
+
+
+
+AccessLogType
+
+
+ |
+
+(Optional)
+ AccessLogFormat sets the global access log format.
+Values: envoy (default), json .
+Other values will produce an error.
+ |
+
+
+
+accessLogFormatString
+
+
+string
+
+ |
+
+(Optional)
+ AccessLogFormatString sets the access log format when format is set to envoy .
+When empty, Envoy’s default format is used.
+ |
+
+
+
+accessLogJSONFields
+
+
+
+AccessLogJSONFields
+
+
+ |
+
+(Optional)
+ AccessLogJSONFields sets the fields that JSON logging will
+output when AccessLogFormat is json.
+ |
+
+
+
+accessLogLevel
+
+
+
+AccessLogLevel
+
+
+ |
+
+(Optional)
+ AccessLogLevel sets the verbosity level of the access log.
+Values: info (default, all requests are logged), error (all non-success requests, i.e. 300+ response code, are logged), critical (all 5xx requests are logged) and disabled .
+Other values will produce an error.
+ |
+
+
+
+EnvoySettings
+
+
+(Appears on:
+ContourDeploymentSpec)
+
+
+
EnvoySettings contains settings for the Envoy part of the installation,
+i.e. the xDS client/data plane and associated resources.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+workloadType
+
+
+
+WorkloadType
+
+
+ |
+
+(Optional)
+ WorkloadType is the type of workload to install Envoy
+as. Choices are DaemonSet and Deployment. If unset, defaults
+to DaemonSet.
+ |
+
+
+
+replicas
+
+
+int32
+
+ |
+
+(Optional)
+ Deprecated: Use DeploymentSettings.Replicas instead.
+Replicas is the desired number of Envoy replicas. If WorkloadType
+is not “Deployment”, this field is ignored. Otherwise, if unset,
+defaults to 2.
+if both DeploymentSettings.Replicas and this one is set, use DeploymentSettings.Replicas .
+ |
+
+
+
+networkPublishing
+
+
+
+NetworkPublishing
+
+
+ |
+
+ NetworkPublishing defines how to expose Envoy to a network.
+ |
+
+
+
+nodePlacement
+
+
+
+NodePlacement
+
+
+ |
+
+(Optional)
+ NodePlacement describes node scheduling configuration of Envoy pods.
+ |
+
+
+
+extraVolumes
+
+
+
+[]Kubernetes core/v1.Volume
+
+
+ |
+
+(Optional)
+ ExtraVolumes holds the extra volumes to add.
+ |
+
+
+
+extraVolumeMounts
+
+
+
+[]Kubernetes core/v1.VolumeMount
+
+
+ |
+
+(Optional)
+ ExtraVolumeMounts holds the extra volume mounts to add (normally used with extraVolumes).
+ |
+
+
+
+podAnnotations
+
+
+map[string]string
+
+ |
+
+(Optional)
+ PodAnnotations defines annotations to add to the Envoy pods.
+the annotations for Prometheus will be appended or overwritten with predefined value.
+ |
+
+
+
+resources
+
+
+
+Kubernetes core/v1.ResourceRequirements
+
+
+ |
+
+(Optional)
+ Compute Resources required by envoy container.
+Cannot be updated.
+More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ |
+
+
+
+logLevel
+
+
+
+LogLevel
+
+
+ |
+
+(Optional)
+ LogLevel sets the log level for Envoy.
+Allowed values are “trace”, “debug”, “info”, “warn”, “error”, “critical”, “off”.
+ |
+
+
+
+daemonSet
+
+
+
+DaemonSetSettings
+
+
+ |
+
+(Optional)
+ DaemonSet describes the settings for running envoy as a DaemonSet .
+if WorkloadType is Deployment ,it’s must be nil
+ |
+
+
+
+deployment
+
+
+
+DeploymentSettings
+
+
+ |
+
+(Optional)
+ Deployment describes the settings for running envoy as a Deployment .
+if WorkloadType is DaemonSet ,it’s must be nil
+ |
+
+
+
+baseID
+
+
+int32
+
+ |
+
+(Optional)
+ The base ID to use when allocating shared memory regions.
+if Envoy needs to be run multiple times on the same machine, each running Envoy will need a unique base ID
+so that the shared memory regions do not conflict.
+defaults to 0.
+ |
+
+
+
+overloadMaxHeapSize
+
+
+uint64
+
+ |
+
+(Optional)
+ OverloadMaxHeapSize defines the maximum heap memory of the envoy controlled by the overload manager.
+When the value is greater than 0, the overload manager is enabled,
+and when envoy reaches 95% of the maximum heap size, it performs a shrink heap operation,
+When it reaches 98% of the maximum heap size, Envoy Will stop accepting requests.
+More info: https://projectcontour.io/docs/main/config/overload-manager/
+ |
+
+
+
+EnvoyTLS
+
+
+(Appears on:
+ClusterParameters,
+EnvoyListenerConfig)
+
+
+
EnvoyTLS describes tls parameters for Envoy listneners.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+minimumProtocolVersion
+
+
+string
+
+ |
+
+(Optional)
+ MinimumProtocolVersion is the minimum TLS version this vhost should
+negotiate.
+Values: 1.2 (default), 1.3 .
+Other values will produce an error.
+ |
+
+
+
+maximumProtocolVersion
+
+
+string
+
+ |
+
+(Optional)
+ MaximumProtocolVersion is the maximum TLS version this vhost should
+negotiate.
+Values: 1.2 , 1.3 (default).
+Other values will produce an error.
+ |
+
+
+
+cipherSuites
+
+
+[]string
+
+ |
+
+(Optional)
+ CipherSuites defines the TLS ciphers to be supported by Envoy TLS
+listeners when negotiating TLS 1.2. Ciphers are validated against the
+set that Envoy supports by default. This parameter should only be used
+by advanced users. Note that these will be ignored when TLS 1.3 is in
+use.
+This field is optional; when it is undefined, a Contour-managed ciphersuite list
+will be used, which may be updated to keep it secure.
+Contour’s default list is:
+- “[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]”
+- “[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]”
+- “ECDHE-ECDSA-AES256-GCM-SHA384”
+- “ECDHE-RSA-AES256-GCM-SHA384”
+Ciphers provided are validated against the following list:
+- “[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]”
+- “[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]”
+- “ECDHE-ECDSA-AES128-GCM-SHA256”
+- “ECDHE-RSA-AES128-GCM-SHA256”
+- “ECDHE-ECDSA-AES128-SHA”
+- “ECDHE-RSA-AES128-SHA”
+- “AES128-GCM-SHA256”
+- “AES128-SHA”
+- “ECDHE-ECDSA-AES256-GCM-SHA384”
+- “ECDHE-RSA-AES256-GCM-SHA384”
+- “ECDHE-ECDSA-AES256-SHA”
+- “ECDHE-RSA-AES256-SHA”
+- “AES256-GCM-SHA384”
+- “AES256-SHA”
+Contour recommends leaving this undefined unless you are sure you must.
+See: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#extensions-transport-sockets-tls-v3-tlsparameters
+Note: This list is a superset of what is valid for stock Envoy builds and those using BoringSSL FIPS.
+ |
+
+
+
+ExtensionProtocolVersion
+(string
alias)
+
+(Appears on:
+ExtensionServiceSpec)
+
+
+
ExtensionProtocolVersion is the version of the GRPC protocol used
+to access extension services. The only version currently supported
+is “v3”.
+
+
+
+
+Value |
+Description |
+
+
+"v2" |
+SupportProtocolVersion2 requests the “v2” support protocol version.
+Deprecated: this protocol version is no longer supported and the
+constant is retained for backwards compatibility only.
+ |
+
"v3" |
+SupportProtocolVersion3 requests the “v3” support protocol version.
+ |
+
+
+ExtensionServiceSpec
+
+
+(Appears on:
+ExtensionService)
+
+
+
ExtensionServiceSpec defines the desired state of an ExtensionService resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+services
+
+
+
+[]ExtensionServiceTarget
+
+
+ |
+
+ Services specifies the set of Kubernetes Service resources that
+receive GRPC extension API requests.
+If no weights are specified for any of the entries in
+this array, traffic will be spread evenly across all the
+services.
+Otherwise, traffic is balanced proportionally to the
+Weight field in each entry.
+ |
+
+
+
+validation
+
+
+
+UpstreamValidation
+
+
+ |
+
+(Optional)
+ UpstreamValidation defines how to verify the backend service’s certificate
+ |
+
+
+
+protocol
+
+
+string
+
+ |
+
+(Optional)
+ Protocol may be used to specify (or override) the protocol used to reach this Service.
+Values may be h2 or h2c. If omitted, protocol-selection falls back on Service annotations.
+ |
+
+
+
+loadBalancerPolicy
+
+
+
+LoadBalancerPolicy
+
+
+ |
+
+(Optional)
+ The policy for load balancing GRPC service requests. Note that the
+Cookie and RequestHash load balancing strategies cannot be used
+here.
+ |
+
+
+
+timeoutPolicy
+
+
+
+TimeoutPolicy
+
+
+ |
+
+(Optional)
+ The timeout policy for requests to the services.
+ |
+
+
+
+protocolVersion
+
+
+
+ExtensionProtocolVersion
+
+
+ |
+
+(Optional)
+ This field sets the version of the GRPC protocol that Envoy uses to
+send requests to the extension service. Since Contour always uses the
+v3 Envoy API, this is currently fixed at “v3”. However, other
+protocol options will be available in future.
+ |
+
+
+
+ExtensionServiceStatus
+
+
+(Appears on:
+ExtensionService)
+
+
+
ExtensionServiceStatus defines the observed state of an
+ExtensionService resource.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+conditions
+
+
+
+[]DetailedCondition
+
+
+ |
+
+(Optional)
+ Conditions contains the current status of the ExtensionService resource.
+Contour will update a single condition, Valid , that is in normal-true polarity.
+Contour will not modify any other Conditions set in this block,
+in case some other controller wants to add a Condition.
+ |
+
+
+
+ExtensionServiceTarget
+
+
+(Appears on:
+ExtensionServiceSpec)
+
+
+
ExtensionServiceTarget defines an Kubernetes Service to target with
+extension service traffic.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ Name is the name of Kubernetes service that will accept service
+traffic.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+ Port (defined as Integer) to proxy traffic to since a service can have multiple defined.
+ |
+
+
+
+weight
+
+
+uint32
+
+ |
+
+(Optional)
+ Weight defines proportion of traffic to balance to the Kubernetes Service.
+ |
+
+
+
+FeatureFlags
+([]string
alias)
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
FeatureFlags defines the set of feature flags
+to toggle new contour features.
+
+GatewayConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
GatewayConfig holds the config for Gateway API controllers.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+gatewayRef
+
+
+
+NamespacedName
+
+
+ |
+
+ GatewayRef defines the specific Gateway that this Contour
+instance corresponds to.
+ |
+
+
+
+GlobalCircuitBreakerDefaults
+
+
+(Appears on:
+ClusterParameters)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+maxConnections
+
+
+uint32
+
+ |
+
+(Optional)
+ The maximum number of connections that a single Envoy instance allows to the Kubernetes Service; defaults to 1024.
+ |
+
+
+
+maxPendingRequests
+
+
+uint32
+
+ |
+
+(Optional)
+ The maximum number of pending requests that a single Envoy instance allows to the Kubernetes Service; defaults to 1024.
+ |
+
+
+
+maxRequests
+
+
+uint32
+
+ |
+
+(Optional)
+ The maximum parallel requests a single Envoy instance allows to the Kubernetes Service; defaults to 1024
+ |
+
+
+
+maxRetries
+
+
+uint32
+
+ |
+
+(Optional)
+ The maximum number of parallel retries a single Envoy instance allows to the Kubernetes Service; defaults to 3.
+ |
+
+
+
+HTTPProxyConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
HTTPProxyConfig defines parameters on HTTPProxy.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+disablePermitInsecure
+
+
+bool
+
+ |
+
+(Optional)
+ DisablePermitInsecure disables the use of the
+permitInsecure field in HTTPProxy.
+Contour’s default is false.
+ |
+
+
+
+rootNamespaces
+
+
+[]string
+
+ |
+
+(Optional)
+ Restrict Contour to searching these namespaces for root ingress routes.
+ |
+
+
+
+fallbackCertificate
+
+
+
+NamespacedName
+
+
+ |
+
+(Optional)
+ FallbackCertificate defines the namespace/name of the Kubernetes secret to
+use as fallback when a non-SNI request is received.
+ |
+
+
+
+HTTPVersionType
+(string
alias)
+
+(Appears on:
+EnvoyConfig)
+
+
+
HTTPVersionType is the name of a supported HTTP version.
+
+
+
+
+Value |
+Description |
+
+
+"HTTP/1.1" |
+HTTPVersion1 is the name of the HTTP/1.1 version.
+ |
+
"HTTP/2" |
+HTTPVersion2 is the name of the HTTP/2 version.
+ |
+
+
+
+
+(Appears on:
+PolicyConfig)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+set
+
+
+map[string]string
+
+ |
+
+(Optional)
+ |
+
+
+
+remove
+
+
+[]string
+
+ |
+
+(Optional)
+ |
+
+
+
+HealthConfig
+
+
+(Appears on:
+ContourConfigurationSpec,
+EnvoyConfig)
+
+
+
HealthConfig defines the endpoints to enable health checks.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+address
+
+
+string
+
+ |
+
+(Optional)
+ Defines the health address interface.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+(Optional)
+ Defines the health port.
+ |
+
+
+
+IngressConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
IngressConfig defines ingress specific config items.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+classNames
+
+
+[]string
+
+ |
+
+(Optional)
+ Ingress Class Names Contour should use.
+ |
+
+
+
+statusAddress
+
+
+string
+
+ |
+
+(Optional)
+ Address to set in Ingress object status.
+ |
+
+
+
+LogLevel
+(string
alias)
+
+(Appears on:
+ContourSettings,
+EnvoySettings)
+
+
+
LogLevel is the logging levels available.
+
+
+
+
+Value |
+Description |
+
+
+"critical" |
+CriticalLog sets the log level for Envoy to critical .
+ |
+
"debug" |
+DebugLog sets the log level for Contour/Envoy to debug .
+ |
+
"error" |
+ErrorLog sets the log level for Envoy to error .
+ |
+
"info" |
+InfoLog sets the log level for Contour/Envoy to info .
+ |
+
"off" |
+OffLog disable logging for Envoy.
+ |
+
"trace" |
+TraceLog sets the log level for Envoy to trace .
+ |
+
"warn" |
+WarnLog sets the log level for Envoy to warn .
+ |
+
+
+MetricsConfig
+
+
+(Appears on:
+ContourConfigurationSpec,
+EnvoyConfig)
+
+
+
MetricsConfig defines the metrics endpoint.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+address
+
+
+string
+
+ |
+
+(Optional)
+ Defines the metrics address interface.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+(Optional)
+ Defines the metrics port.
+ |
+
+
+
+tls
+
+
+
+MetricsTLS
+
+
+ |
+
+(Optional)
+ TLS holds TLS file config details.
+Metrics and health endpoints cannot have same port number when metrics is served over HTTPS.
+ |
+
+
+
+MetricsTLS
+
+
+(Appears on:
+MetricsConfig)
+
+
+
TLS holds TLS file config details.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+caFile
+
+
+string
+
+ |
+
+(Optional)
+ CA filename.
+ |
+
+
+
+certFile
+
+
+string
+
+ |
+
+(Optional)
+ Client certificate filename.
+ |
+
+
+
+keyFile
+
+
+string
+
+ |
+
+(Optional)
+ Client key filename.
+ |
+
+
+
+NamespacedName
+
+
+(Appears on:
+EnvoyConfig,
+GatewayConfig,
+HTTPProxyConfig,
+RateLimitServiceConfig,
+TracingConfig)
+
+
+
NamespacedName defines the namespace/name of the Kubernetes resource referred from the config file.
+Used for Contour config YAML file parsing, otherwise we could use K8s types.NamespacedName.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+
+string
+
+ |
+
+ |
+
+
+
+namespace
+
+
+string
+
+ |
+
+ |
+
+
+
+NetworkParameters
+
+
+(Appears on:
+EnvoyConfig)
+
+
+
NetworkParameters hold various configurable network values.
+
+
+NetworkPublishing
+
+
+(Appears on:
+EnvoySettings)
+
+
+
NetworkPublishing defines the schema for publishing to a network.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+
+NetworkPublishingType
+
+
+ |
+
+(Optional)
+ NetworkPublishingType is the type of publishing strategy to use. Valid values are:
+
+In this configuration, network endpoints for Envoy use container networking.
+A Kubernetes LoadBalancer Service is created to publish Envoy network
+endpoints.
+See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer
+
+Publishes Envoy network endpoints using a Kubernetes NodePort Service.
+In this configuration, Envoy network endpoints use container networking. A Kubernetes
+NodePort Service is created to publish the network endpoints.
+See: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport
+NOTE:
+When provisioning an Envoy NodePortService , use Gateway Listeners’ port numbers to populate
+the Service’s node port values, there’s no way to auto-allocate them.
+See: https://github.com/projectcontour/contour/issues/4499
+
+Publishes Envoy network endpoints using a Kubernetes ClusterIP Service.
+In this configuration, Envoy network endpoints use container networking. A Kubernetes
+ClusterIP Service is created to publish the network endpoints.
+See: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+If unset, defaults to LoadBalancerService.
+ |
+
+
+
+externalTrafficPolicy
+
+
+
+Kubernetes core/v1.ServiceExternalTrafficPolicy
+
+
+ |
+
+(Optional)
+ ExternalTrafficPolicy describes how nodes distribute service traffic they
+receive on one of the Service’s “externally-facing” addresses (NodePorts, ExternalIPs,
+and LoadBalancer IPs).
+If unset, defaults to “Local”.
+ |
+
+
+
+ipFamilyPolicy
+
+
+
+Kubernetes core/v1.IPFamilyPolicy
+
+
+ |
+
+(Optional)
+ IPFamilyPolicy represents the dual-stack-ness requested or required by
+this Service. If there is no value provided, then this field will be set
+to SingleStack. Services can be “SingleStack” (a single IP family),
+“PreferDualStack” (two IP families on dual-stack configured clusters or
+a single IP family on single-stack clusters), or “RequireDualStack”
+(two IP families on dual-stack configured clusters, otherwise fail).
+ |
+
+
+
+serviceAnnotations
+
+
+map[string]string
+
+ |
+
+(Optional)
+ ServiceAnnotations is the annotations to add to
+the provisioned Envoy service.
+ |
+
+
+
+NetworkPublishingType
+(string
alias)
+
+(Appears on:
+NetworkPublishing)
+
+
+
NetworkPublishingType is a way to publish network endpoints.
+
+
+
+
+Value |
+Description |
+
+
+"ClusterIPService" |
+ClusterIPServicePublishingType publishes a network endpoint using a Kubernetes
+ClusterIP Service.
+ |
+
"LoadBalancerService" |
+LoadBalancerServicePublishingType publishes a network endpoint using a Kubernetes
+LoadBalancer Service.
+ |
+
"NodePortService" |
+NodePortServicePublishingType publishes a network endpoint using a Kubernetes
+NodePort Service.
+ |
+
+
+NodePlacement
+
+
+(Appears on:
+ContourSettings,
+EnvoySettings)
+
+
+
NodePlacement describes node scheduling configuration for pods.
+If nodeSelector and tolerations are specified, the scheduler will use both to
+determine where to place the pod(s).
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+nodeSelector
+
+
+map[string]string
+
+ |
+
+(Optional)
+ NodeSelector is the simplest recommended form of node selection constraint
+and specifies a map of key-value pairs. For the pod to be eligible
+to run on a node, the node must have each of the indicated key-value pairs
+as labels (it can have additional labels as well).
+If unset, the pod(s) will be scheduled to any available node.
+ |
+
+
+
+tolerations
+
+
+
+[]Kubernetes core/v1.Toleration
+
+
+ |
+
+(Optional)
+ Tolerations work with taints to ensure that pods are not scheduled
+onto inappropriate nodes. One or more taints are applied to a node; this
+marks that the node should not accept any pods that do not tolerate the
+taints.
+The default is an empty list.
+See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+for additional details.
+ |
+
+
+
+PolicyConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
PolicyConfig holds default policy used if not explicitly set by the user
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+requestHeaders
+
+
+
+HeadersPolicy
+
+
+ |
+
+(Optional)
+ RequestHeadersPolicy defines the request headers set/removed on all routes
+ |
+
+
+
+responseHeaders
+
+
+
+HeadersPolicy
+
+
+ |
+
+(Optional)
+ ResponseHeadersPolicy defines the response headers set/removed on all routes
+ |
+
+
+
+applyToIngress
+
+
+bool
+
+ |
+
+(Optional)
+ ApplyToIngress determines if the Policies will apply to ingress objects
+Contour’s default is false.
+ |
+
+
+
+RateLimitServiceConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
RateLimitServiceConfig defines properties of a global Rate Limit Service.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+extensionService
+
+
+
+NamespacedName
+
+
+ |
+
+ ExtensionService identifies the extension service defining the RLS.
+ |
+
+
+
+domain
+
+
+string
+
+ |
+
+(Optional)
+ Domain is passed to the Rate Limit Service.
+ |
+
+
+
+failOpen
+
+
+bool
+
+ |
+
+(Optional)
+ FailOpen defines whether to allow requests to proceed when the
+Rate Limit Service fails to respond with a valid rate limit
+decision within the timeout defined on the extension service.
+ |
+
+
+
+enableXRateLimitHeaders
+
+
+bool
+
+ |
+
+(Optional)
+ EnableXRateLimitHeaders defines whether to include the X-RateLimit
+headers X-RateLimit-Limit, X-RateLimit-Remaining, and X-RateLimit-Reset
+(as defined by the IETF Internet-Draft linked below), on responses
+to clients when the Rate Limit Service is consulted for a request.
+ref. https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html
+ |
+
+
+
+enableResourceExhaustedCode
+
+
+bool
+
+ |
+
+(Optional)
+ EnableResourceExhaustedCode enables translating error code 429 to
+grpc code RESOURCE_EXHAUSTED. When disabled it’s translated to UNAVAILABLE
+ |
+
+
+
+defaultGlobalRateLimitPolicy
+
+
+
+GlobalRateLimitPolicy
+
+
+ |
+
+(Optional)
+ DefaultGlobalRateLimitPolicy allows setting a default global rate limit policy for every HTTPProxy.
+HTTPProxy can overwrite this configuration.
+ |
+
+
+
+
+
+(Appears on:
+EnvoyListenerConfig)
+
+
+
ServerHeaderTransformation defines the action to be applied to the Server header on the response path
+
+
+
+
+Value |
+Description |
+
+
+"append_if_absent" |
+If no Server header is present, set it to “envoy”.
+If a Server header is present, pass it through.
+ |
+
"overwrite" |
+Overwrite any Server header with “envoy”.
+This is the default value.
+ |
+
"pass_through" |
+Pass through the value of the Server header, and do not append a header
+if none is present.
+ |
+
+
+SocketOptions
+
+
+(Appears on:
+EnvoyListenerConfig)
+
+
+
SocketOptions defines configurable socket options for Envoy listeners.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+tos
+
+
+int32
+
+ |
+
+(Optional)
+ Defines the value for IPv4 TOS field (including 6 bit DSCP field) for IP packets originating from Envoy listeners.
+Single value is applied to all listeners.
+If listeners are bound to IPv6-only addresses, setting this option will cause an error.
+ |
+
+
+
+trafficClass
+
+
+int32
+
+ |
+
+(Optional)
+ Defines the value for IPv6 Traffic Class field (including 6 bit DSCP field) for IP packets originating from the Envoy listeners.
+Single value is applied to all listeners.
+If listeners are bound to IPv4-only addresses, setting this option will cause an error.
+ |
+
+
+
+TLS
+
+
+(Appears on:
+XDSServerConfig)
+
+
+
TLS holds TLS file config details.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+caFile
+
+
+string
+
+ |
+
+(Optional)
+ CA filename.
+ |
+
+
+
+certFile
+
+
+string
+
+ |
+
+(Optional)
+ Client certificate filename.
+ |
+
+
+
+keyFile
+
+
+string
+
+ |
+
+(Optional)
+ Client key filename.
+ |
+
+
+
+insecure
+
+
+bool
+
+ |
+
+(Optional)
+ Allow serving the xDS gRPC API without TLS.
+ |
+
+
+
+TimeoutParameters
+
+
+(Appears on:
+EnvoyConfig)
+
+
+
TimeoutParameters holds various configurable proxy timeout values.
+
+
+TracingConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
TracingConfig defines properties for exporting trace data to OpenTelemetry.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+includePodDetail
+
+
+bool
+
+ |
+
+(Optional)
+ IncludePodDetail defines a flag.
+If it is true, contour will add the pod name and namespace to the span of the trace.
+the default is true.
+Note: The Envoy pods MUST have the HOSTNAME and CONTOUR_NAMESPACE environment variables set for this to work properly.
+ |
+
+
+
+serviceName
+
+
+string
+
+ |
+
+ ServiceName defines the name for the service.
+contour’s default is contour.
+ |
+
+
+
+overallSampling
+
+
+string
+
+ |
+
+(Optional)
+ OverallSampling defines the sampling rate of trace data.
+contour’s default is 100.
+ |
+
+
+
+maxPathTagLength
+
+
+uint32
+
+ |
+
+(Optional)
+ MaxPathTagLength defines maximum length of the request path
+to extract and include in the HttpUrl tag.
+contour’s default is 256.
+ |
+
+
+
+customTags
+
+
+
+[]*github.com/projectcontour/contour/apis/projectcontour/v1alpha1.CustomTag
+
+
+ |
+
+(Optional)
+ CustomTags defines a list of custom tags with unique tag name.
+ |
+
+
+
+extensionService
+
+
+
+NamespacedName
+
+
+ |
+
+ ExtensionService identifies the extension service defining the otel-collector.
+ |
+
+
+
+WorkloadType
+(string
alias)
+
+(Appears on:
+EnvoySettings)
+
+
+
WorkloadType is the type of Kubernetes workload to use for a component.
+
+XDSServerConfig
+
+
+(Appears on:
+ContourConfigurationSpec)
+
+
+
XDSServerConfig holds the config for the Contour xDS server.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+type
+
+
+
+XDSServerType
+
+
+ |
+
+(Optional)
+ Defines the XDSServer to use for contour serve .
+Values: envoy (default), contour (deprecated) .
+Other values will produce an error.
+ |
+
+
+
+address
+
+
+string
+
+ |
+
+(Optional)
+ Defines the xDS gRPC API address which Contour will serve.
+Contour’s default is “0.0.0.0”.
+ |
+
+
+
+port
+
+
+int
+
+ |
+
+(Optional)
+ Defines the xDS gRPC API port which Contour will serve.
+Contour’s default is 8001.
+ |
+
+
+
+tls
+
+
+
+TLS
+
+
+ |
+
+(Optional)
+ TLS holds TLS file config details.
+Contour’s default is { caFile: “/certs/ca.crt”, certFile: “/certs/tls.cert”, keyFile: “/certs/tls.key”, insecure: false }.
+ |
+
+
+
+XDSServerType
+(string
alias)
+
+(Appears on:
+XDSServerConfig)
+
+
+
XDSServerType is the type of xDS server implementation.
+
+
+
+
+Value |
+Description |
+
+
+"contour" |
+Use Contour’s xDS server (deprecated).
+ |
+
"envoy" |
+Use the upstream go-control-plane -based xDS server.
+ |
+
+
+
+
+Generated with gen-crd-api-reference-docs
.
+
diff --git a/site/content/docs/1.29/config/api.md b/site/content/docs/1.29/config/api.md
new file mode 100644
index 00000000000..99809537d11
--- /dev/null
+++ b/site/content/docs/1.29/config/api.md
@@ -0,0 +1,3 @@
+# Contour API Reference
+
+{{% include-html api-reference.html %}}
diff --git a/site/content/docs/1.29/config/client-authorization.md b/site/content/docs/1.29/config/client-authorization.md
new file mode 100644
index 00000000000..4db2b932eff
--- /dev/null
+++ b/site/content/docs/1.29/config/client-authorization.md
@@ -0,0 +1,123 @@
+# Client Authorization
+
+Contour supports integrating external servers to authorize client requests.
+
+Envoy implements external authorization in the [ext_authz][1] filter.
+This filter intercepts client requests and holds them while it sends a check
+request to an external server.
+The filter uses the check result to either allow the request to proceed, or to
+deny or redirect the request.
+
+The diagram below shows the sequence of requests involved in the successful
+authorization of a HTTP request:
+
+
+
+
+
+The [external authorization][7] guides demonstrates how to deploy HTTP basic
+authentication using Contour and [contour-authserver](https://github.com/projectcontour/contour-authserver).
+
+## Extension Services
+
+The starting point for external authorization in Contour is the
+[ExtensionService][2] API.
+This API creates a cluster which Envoy can use to send requests to an external server.
+In principle, the Envoy cluster can be used for any purpose, but in this
+document we are concerned only with how to use it as an authorization service.
+
+An authorization service is a gRPC service that implements the Envoy [CheckRequest][3] protocol.
+Note that Contour requires the extension to implement the "v3" version of the protocol.
+Contour is compatible with any authorization server that implements this protocol.
+
+The primary field of interest in the `ExtensionService` CRD is the
+`.spec.services` field.
+This field lists the Kubernetes Services that will receive the check requests.
+The `.spec.services[].name` field contains the name of the Service, which must
+exist in the same namespace as the `ExtensionService` object.
+The `ExtensionService` object must exist in the same namespace as the
+Services they target to ensure that both objects are under the same
+administrative control.
+
+### Load Balancing for Extension Services
+
+An `ExtensionService` can be configured to send traffic to multiple Kubernetes Services.
+In this case, requests are divided proportionally across the Services according
+to the weight in the `.spec.services[].weight` field.
+The service weight can be used to flexibly shift traffic between Services for
+reasons like implementing blue-green deployments.
+The `.spec.loadBalancerPolicy` field configures how Envoy will load balance
+requests to the endpoints within each Service.
+
+### TLS Validation for Extension Services
+
+Since authorizing a client request may involve passing sensitive credentials
+from a HTTP request to the authorization service, the connection to the
+authorization server should be as secure as possible.
+Contour defaults the `.spec.protocol` field to "h2", which configures
+Envoy to use HTTP/2 over TLS for the authorization service connection.
+
+The [.spec.validation][4] field configures how Envoy should verify the TLS
+identity of the authorization server.
+This is a critical protection against accidentally sending credentials to an
+imposter service and should be enabled for all production deployments.
+The `.spec.validation` field should specify the expected server name
+from the authorization server's TLS certificate, and the trusted CA bundle
+that can be used to validate the TLS chain of trust.
+
+## Authorizing Virtual Hosts
+
+The [.spec.virtualhost.authorization][5] field in the Contour `HTTPProxy`
+API connects a virtual host to an authorization server that is bound by an
+`ExtensionService` object.
+Each virtual host can use a different `ExtensionService`, but only one
+`ExtensionService` can be used by a single virtual host.
+Authorization servers can only be attached to `HTTPProxy` objects that have TLS
+termination enabled.
+
+### Migrating from Application Authorization
+
+When applications perform their own authorization, migrating to centralized
+authorization may need some planning.
+The `.spec.virtualhost.authorization.failOpen` field controls how client
+requests should be handled when the authorization server fails.
+During a migration process, this can be set to `true`, so that if the
+authorization server becomes unavailable, clients can gracefully fall back to
+the existing application authorization mechanism.
+
+### Scoping Authorization Policy Settings
+
+It is common for services to contain some HTTP request paths that require
+authorization and some that do not.
+The HTTPProxy [authorization policy][6] allows authorization to be
+disabled for both an entire virtual host and for specific routes.
+
+The initial authorization policy is set on the HTTPProxy virtual host
+in the `.spec.virtualhost.authorization.authPolicy` field.
+This configures whether authorization is enabled, and the default authorization policy context.
+If authorization is disabled on the virtual host, it is also disabled by
+default on all the routes for that virtual host that do not specify an authorization policy.
+However, a route can configure its own authorization policy (in the
+`.spec.routes[].authPolicy` field) that can configure whether authorization
+is enabled, irrespective of the virtual host setting.
+
+The authorization policy context is a way to configure a set of key/value
+pairs that will be sent to the authorization server with each request check
+request.
+The keys and values that should be specified here depend on which authorization
+server has been configured.
+This facility is intended for configuring authorization-specific information, such as
+the basic authentication realm, or OIDC parameters.
+
+The initial context map can be set on the virtual host.
+This sets the context keys that will be sent on every check request.
+A route can overwrite the value for a context key by setting it in the
+context field of authorization policy for the route.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/ext_authz_filter
+[2]: api/#projectcontour.io/v1alpha1.ExtensionService
+[3]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto
+[4]: api/#projectcontour.io/v1.UpstreamValidation
+[5]: api/#projectcontour.io/v1.AuthorizationServer
+[6]: api/#projectcontour.io/v1.AuthorizationPolicy
+[7]: guides/external-authorization.md
diff --git a/site/content/docs/1.29/config/cookie-rewriting.md b/site/content/docs/1.29/config/cookie-rewriting.md
new file mode 100644
index 00000000000..480fc34125c
--- /dev/null
+++ b/site/content/docs/1.29/config/cookie-rewriting.md
@@ -0,0 +1,109 @@
+# Cookie Rewriting
+
+Contour now enables users to customize attributes on HTTP `Set-Cookie` response headers.
+Application specific cookies and cookies generated by Contour's ["cookie" load balancing strategy](https://projectcontour.io/docs/v1.19.0/config/request-routing/#session-affinity) can be rewritten either per HTTPProxy `Route` or `Service`.
+Users can choose to rewrite the `Path`, `Domain`, `Secure`, and `SameSite` attributes of the `Set-Cookie` header currently.
+These attributes may be things an application may not be able to accurately set, without prior knowledge of how the application is deployed.
+For example, if Contour is in use to rewrite the path or hostname of a request before it reaches an application backend, the application may not be able to accurately set the `Path` and `Domain` attributes in a `Set-Cookie` response header.
+This feature can be used to apply security settings to ensure browsers treat generated cookies appropriately.
+The `SameSite` and `Secure` attributes are currently not set by Envoy when it generates the `X-Contour-Session-Affinity`, but with this feature, users can customize this cookie further.
+
+## Per-Route Cookie Rewriting
+
+In order to implement separate cookie rewriting policies per-route, we can configure an HTTPProxy as below:
+
+```yaml
+# cookie-rewrite-route.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: cookie-rewrite-route
+spec:
+ virtualhost:
+ fqdn: cookie-rewrite-route.com
+ routes:
+ - conditions:
+ - prefix: /admin
+ services:
+ - name: admin-app
+ port: 80
+ cookieRewritePolicies:
+ - name: X-Admin-Session
+ pathRewrite:
+ value: /admin
+ - conditions:
+ - prefix: /payments
+ services:
+ - name: payment-app
+ port: 80
+ cookieRewritePolicies:
+ - name: X-User-Session
+ pathRewrite:
+ value: /payments
+ sameSite: Lax
+ - name: X-User-Data
+ sameSite: Lax
+```
+
+This HTTPProxy allows us to rewrite the `Path` attribute of the `X-Admin-Session` cookie on the `/admin` route.
+In addition on the `/payments` route we rewrite the `Path` and `SameSite` attributes of the `X-User-Session` cookie and the `SameSite` attribute of the additional `X-User-Data` cookie.
+If the backing services `payment-app` and `admin-app` return the specified cookies in `Set-Cookie` response headers, they will be rewritten with the values specified above.
+
+## Per-Service Cookie Rewriting
+
+Similar to the above, if we have more than one `Service` configured per `Route` but want to customize cookies separately between them we can:
+
+```yaml
+# cookie-rewrite-service.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: cookie-rewrite-service
+spec:
+ virtualhost:
+ fqdn: cookie-rewrite-service.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: backend-1
+ port: 80
+ cookieRewritePolicies:
+ - name: X-User-Data-1
+ domainRewrite:
+ value: cookie-rewrite-service.com
+ - name: backend-2
+ port: 80
+ cookieRewritePolicies:
+ - name: X-User-Data-2
+ domainRewrite:
+ value: cookie-rewrite-service.com
+```
+
+## Rewriting Contour Session Affinity Cookie
+
+As mentioned above, users can use Contour's cookie load balancing strategy to enable session affinity.
+Envoy generates a pretty bare-bones cookie but Contour's cookie rewriting feature can be used to customize this cookie to add security attributes:
+
+```yaml
+# cookie-rewrite-session-affinity.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: cookie-rewrite-session-affinity
+spec:
+ virtualhost:
+ fqdn: cookie-rewrite-session-affinity.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: backend
+ port: 80
+ loadBalancerPolicy:
+ strategy: Cookie
+ cookieRewritePolicies:
+ - name: X-Contour-Session-Affinity
+ sameSite: Strict
+ secure: true
+```
diff --git a/site/content/docs/1.29/config/cors.md b/site/content/docs/1.29/config/cors.md
new file mode 100644
index 00000000000..8f468aeaec7
--- /dev/null
+++ b/site/content/docs/1.29/config/cors.md
@@ -0,0 +1,82 @@
+# CORS
+
+A CORS (Cross-origin resource sharing) policy can be set for a HTTPProxy in order to allow cross-domain requests for trusted sources.
+If a policy is set, it will be applied to all the routes of the virtual host.
+
+Contour allows configuring the headers involved in responses to cross-domain requests.
+These include the `Access-Control-Allow-Origin`, `Access-Control-Allow-Methods`, `Access-Control-Allow-Headers`, `Access-Control-Expose-Headers`, `Access-Control-Max-Age`, `Access-Control-Allow-Private-Network` and `Access-Control-Allow-Credentials` headers in responses.
+
+In this example, cross-domain requests will be allowed for any domain (note the `*` value), with the methods `GET`, `POST`, or `OPTIONS`.
+Headers `Authorization` and `Cache-Control` will be passed to the upstream server and headers `Content-Length` and `Content-Range` will be made available to the cross-origin request client.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: cors-example
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ corsPolicy:
+ allowCredentials: true
+ allowPrivateNetwork: true
+ allowOrigin:
+ - "*" # allows any origin
+ allowMethods:
+ - GET
+ - POST
+ - OPTIONS
+ allowHeaders:
+ - authorization
+ - cache-control
+ exposeHeaders:
+ - Content-Length
+ - Content-Range
+ maxAge: "10m" # preflight requests can be cached for 10 minutes.
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: cors-example
+ port: 80
+```
+
+The `allowOrigin` list may also be configured with exact origin matches or regex patterns.
+In the following example, cross-domain requests must originate from the domain `https://client.example.com` or domains that match the regex `http[s]?:\/\/some-site-[a-z0-9]+\.example\.com` (e.g. request with `Origin` header `https://some-site-abc456.example.com`)
+
+*Note:* Patterns for matching `Origin` headers must be valid regex, simple "globbing" patterns (e.g. `*.foo.com`) will not be accepted or may produce incorrect matches.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: cors-example
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ corsPolicy:
+ allowCredentials: true
+ allowOrigin:
+ - https://client.example.com
+ - http[s]?:\/\/some-site-[a-z0-9]+\.example\.com
+ allowMethods:
+ - GET
+ - POST
+ - OPTIONS
+ allowHeaders:
+ - authorization
+ - cache-control
+ exposeHeaders:
+ - Content-Length
+ - Content-Range
+ maxAge: "10m"
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: cors-example
+ port: 80
+```
+
+`MaxAge` durations are expressed in the Go [duration format](https://godoc.org/time#ParseDuration).
+Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Only positive values are allowed and 0 disables the cache requiring a preflight `OPTIONS` check for all cross-origin requests.
diff --git a/site/content/docs/1.29/config/external-service-routing.md b/site/content/docs/1.29/config/external-service-routing.md
new file mode 100644
index 00000000000..7da431dd06b
--- /dev/null
+++ b/site/content/docs/1.29/config/external-service-routing.md
@@ -0,0 +1,47 @@
+# External Service Routing
+
+HTTPProxy supports routing traffic to `ExternalName` service types, but this is disabled by default, as it can lead
+to inadvertent exposure of the Envoy Admin UI, allowing remote shutdown and restart of Envoy.
+Please see [this security advisory](https://github.com/projectcontour/contour/security/advisories/GHSA-5ph6-qq5x-7jwc) for all the details.
+It can also be used to expose services in namespaces a user does not have access to, using an ExternalName of `service.namespace.svc.cluster.local`.
+Please see [this Kubernetes security advisory](https://github.com/kubernetes/kubernetes/issues/103675) for more details.
+
+We do *not* recommend enabling ExternalName Services without a strong use case, and understanding of the security implications.
+
+However, To enable ExternalName processing, you must set the `enableExternalNameService` configuration file setting to `true`.
+This will allow the following configuration to be valid.
+
+## ExternalName Support
+
+Contour looks at the `spec.externalName` field of the service and configures the route to use that DNS name instead of utilizing EDS.
+
+Note that hostnames of `localhost` or some other synonyms will be rejected (because of the aforementioned security issues).
+
+There's nothing specific in the HTTPProxy object that needs to be configured other than referencing a service of type `ExternalName`.
+HTTPProxy supports the `requestHeadersPolicy` field to rewrite the `Host` header after first handling a request and before proxying to an upstream service.
+This field can be used to ensure that the forwarded HTTP request contains the hostname that the external resource is expecting.
+
+_**Note:** The ports are required to be specified._
+
+```yaml
+# httpproxy-externalname.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ run: externaldns
+ name: externaldns
+ namespace: default
+spec:
+ externalName: foo-basic.bar.com
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ type: ExternalName
+```
+
+To proxy to another resource outside the cluster (e.g. A hosted object store bucket for example), configure that external resource in a service type `externalName`.
+Then define a `requestHeadersPolicy` which replaces the `Host` header with the value of the external name service defined previously.
+Finally, if the upstream service is served over TLS, set the `protocol` field on the service to `tls` or annotate the external name service with: `projectcontour.io/upstream-protocol.tls: 443,https`, assuming your service had a port 443 and name `https`.
diff --git a/site/content/docs/1.29/config/fundamentals.md b/site/content/docs/1.29/config/fundamentals.md
new file mode 100644
index 00000000000..0bdac65f77f
--- /dev/null
+++ b/site/content/docs/1.29/config/fundamentals.md
@@ -0,0 +1,197 @@
+# HTTPProxy Fundamentals
+
+The [Ingress][1] object was added to Kubernetes in version 1.1 to describe properties of a cluster-wide reverse HTTP proxy.
+Since that time, the Ingress API has remained relatively unchanged, and the need to express implementation-specific capabilities has inspired an [explosion of annotations][2].
+
+The goal of the HTTPProxy Custom Resource Definition (CRD) is to expand upon the functionality of the Ingress API to allow for a richer user experience as well addressing the limitations of the latter's use in multi tenant environments.
+
+## Key HTTPProxy Benefits
+
+- Safely supports multi-team Kubernetes clusters, with the ability to limit which Namespaces may configure virtual hosts and TLS credentials.
+- Enables including of routing configuration for a path or domain from another HTTPProxy, possibly in another Namespace.
+- Accepts multiple services within a single route and load balances traffic across them.
+- Natively allows defining service weighting and load balancing strategy without annotations.
+- Validation of HTTPProxy objects at creation time and status reporting for post-creation validity.
+
+## Ingress to HTTPProxy
+
+A minimal Ingress object might look like:
+
+```yaml
+# ingress.yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: basic
+spec:
+ rules:
+ - host: foo-basic.bar.com
+ http:
+ paths:
+ - backend:
+ service:
+ name: s1
+ port:
+ number: 80
+ pathType: Prefix
+```
+
+This Ingress object, named `basic`, will route incoming HTTP traffic with a `Host:` header for `foo-basic.bar.com` to a Service named `s1` on port `80`.
+Implementing similar behavior using an HTTPProxy looks like this:
+
+```yaml
+# httpproxy.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: basic
+spec:
+ virtualhost:
+ fqdn: foo-basic.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+```
+
+**Lines 1-5**: As with all other Kubernetes objects, an HTTPProxy needs apiVersion, kind, and metadata fields.
+
+**Lines 7-8**: The presence of the `virtualhost` field indicates that this is a root HTTPProxy that is the top level entry point for this domain.
+
+
+## Interacting with HTTPProxies
+
+As with all Kubernetes objects, you can use `kubectl` to create, list, describe, edit, and delete HTTPProxy CRDs.
+
+Creating an HTTPProxy:
+
+```bash
+$ kubectl create -f basic.httpproxy.yaml
+httpproxy "basic" created
+```
+
+Listing HTTPProxies:
+
+```bash
+$ kubectl get httpproxy
+NAME AGE
+basic 24s
+```
+
+Describing HTTPProxy:
+
+```bash
+$ kubectl describe httpproxy basic
+Name: basic
+Namespace: default
+Labels:
+API Version: projectcontour.io/v1
+Kind: HTTPProxy
+Metadata:
+ Cluster Name:
+ Creation Timestamp: 2019-07-05T19:26:54Z
+ Resource Version: 19373717
+ Self Link: /apis/projectcontour.io/v1/namespaces/default/httpproxy/basic
+ UID: 6036a9d7-8089-11e8-ab00-f80f4182762e
+Spec:
+ Routes:
+ Conditions:
+ Prefix: /
+ Services:
+ Name: s1
+ Port: 80
+ Virtualhost:
+ Fqdn: foo-basic.bar.com
+Events:
+```
+
+Deleting HTTPProxies:
+
+```bash
+$ kubectl delete httpproxy basic
+httpproxy "basic" deleted
+```
+
+## Status Reporting
+
+There are many misconfigurations that could cause an HTTPProxy or delegation to be invalid.
+Contour will make its best effort to process even partially valid configuration and allow traffic to be served for the valid parts.
+To aid users in resolving any issues, Contour updates a `status` field in all HTTPProxy objects.
+
+If an HTTPProxy object is valid, it will have a status property that looks like this:
+
+```yaml
+status:
+ currentStatus: valid
+ description: valid HTTPProxy
+```
+
+If the HTTPProxy is invalid, the `currentStatus` field will be `invalid` and the `description` field will provide a description of the issue.
+
+As an example, if an HTTPProxy object has specified a negative value for weighting, the HTTPProxy status will be:
+
+```yaml
+status:
+ currentStatus: invalid
+ description: "route '/foo': service 'home': weight must be greater than or equal to zero"
+```
+
+Some examples of invalid configurations that Contour provides statuses for:
+
+- Negative weight provided in the route definition.
+- Invalid port number provided for service.
+- Prefix in parent does not match route in delegated route.
+- Root HTTPProxy created in a namespace other than the allowed root namespaces.
+- A given Route of an HTTPProxy both delegates to another HTTPProxy and has a list of services.
+- Orphaned route.
+- Delegation chain produces a cycle.
+- Root HTTPProxy does not specify fqdn.
+- Multiple prefixes cannot be specified on the same set of route conditions.
+- Multiple header conditions of type "exact match" with the same header key.
+- Contradictory header conditions on a route, e.g. a "contains" and "notcontains" condition for the same header and value.
+
+Invalid configuration is ignored and will be not used in the ingress routing configuration.
+Envoy will respond with an error when HTTP request is received on route with invalid configuration on following cases:
+
+* `502 Bad Gateway` response is sent when HTTPProxy has an include that refers to an HTTPProxy that does not exist.
+* `503 Service Unavailable` response is sent when HTTPProxy refers to a service that does not exist.
+
+### Example
+
+Following example has two routes: the first one is valid, the second one refers to a service that does not exist.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: multiple-routes-with-a-missing-service
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: valid-service
+ port: 80
+ - conditions:
+ - prefix: /subpage
+ services:
+ - name: service-that-does-not-exist
+ port: 80
+```
+
+The `HTTPProxy` will have condition `Valid=false` with detailed error message: `Spec.Routes unresolved service reference: service "default/service-that-does-not-exist" not found`.
+Requests received for `http://www.example.com/` will be forwarded to `valid-service` but requests received for `http://www.example.com/subpage` will result in error `503 Service Unavailable` response from Envoy.
+
+## HTTPProxy API Specification
+
+The full HTTPProxy specification is described in detail in the [API documentation][4].
+There are a number of working examples of HTTPProxy objects in the [`examples/example-workload`][3] directory of the Contour Github repository.
+
+ [1]: https://kubernetes.io/docs/concepts/services-networking/ingress/
+ [2]: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md
+ [3]: {{< param github_url>}}/tree/{{< param branch >}}/examples/example-workload/httpproxy
+ [4]: api.md
diff --git a/site/content/docs/1.29/config/gateway-api.md b/site/content/docs/1.29/config/gateway-api.md
new file mode 100644
index 00000000000..605103dc7e3
--- /dev/null
+++ b/site/content/docs/1.29/config/gateway-api.md
@@ -0,0 +1,217 @@
+# Gateway API
+
+## Introduction
+
+[Gateway API][1] is an open source project managed by the SIG Network community.
+It is a collection of resources that model service networking in Kubernetes.
+These resources - GatewayClass, Gateway, HTTPRoute, TCPRoute, Service, etc - aim to evolve Kubernetes service networking through expressive, extensible, and role-oriented interfaces that are implemented by many vendors and have broad industry support.
+
+Contour implements Gateway API in addition to supporting HTTPProxy and Ingress.
+In particular, Contour aims to support all [core and extended features][2] in Gateway API.
+
+Gateway API has a comprehensive [website and docs][1], so this document focuses primarily on unique aspects of Contour's Gateway API implementation, rather than attempting to reproduce all of the content available on the Gateway API website.
+The reader is suggested to familiarize themselves with the basics of Gateway API before continuing with this doc.
+
+In Contour's Gateway API implementation, a Gateway corresponds 1:1 with a single deployment of Contour + Envoy.
+In other words, each Gateway has its own control plane (Contour) and data plane (Envoy).
+
+The remainder of this document delves into more detail regarding configuration options when using Contour with Gateway API.
+If you are looking for a way to get started with Gateway API and Contour, see the [Gateway API guide][12], a step-by-step tutorial on getting Contour installed with Gateway API and using it to route traffic to a service.
+
+## Enabling Gateway API in Contour
+
+There are two ways to deploy Contour with Gateway API support: **static** provisioning and **dynamic** provisioning.
+
+In **static** provisioning, the platform operator defines a `Gateway` resource, and then manually deploys a Contour instance corresponding to that `Gateway` resource.
+It is up to the platform operator to ensure that all configuration matches between the `Gateway` and the Contour/Envoy resources.
+Contour will then process that `Gateway` and its routes.
+
+In **dynamic** provisioning, the platform operator first deploys Contour's Gateway provisioner. Then, the platform operator defines a `Gateway` resource, and the provisioner automatically deploys a Contour instance that corresponds to the `Gateway's` configuration and will process that `Gateway` and its routes.
+
+Static provisioning makes sense for users who:
+- prefer the traditional model of deploying Contour
+- have only a single Gateway
+- want to use just the standard listener ports (80/443)
+- have highly customized YAML for deploying Contour.
+
+Dynamic provisioning makes sense for users who:
+- have many Gateways
+- want to use additional listener ports
+- prefer a simple declarative API for provisioning Contour instances
+- want a fully conformant Gateway API implementation
+
+### Static Provisioning
+
+To statically provision Contour with Gateway API enabled:
+
+1. Install the [Gateway API experimental channel][3].
+1. Create a GatewayClass, with a controller name of `projectcontour.io/gateway-controller`.
+1. Create a Gateway using the above GatewayClass.
+1. In the Contour config file, add a reference to the above Gateway via `gateway.gatewayRef` (see https://projectcontour.io/docs/1.25/configuration/#gateway-configuration)
+1. Install Contour using the above config file.
+
+Contour provides an example manifest for this at https://projectcontour.io/quickstart/contour-gateway.yaml.
+
+### Dynamic Provisioning
+
+To dynamically provision Contour with Gateway API enabled:
+
+1. Install the [Contour Gateway Provisioner][9], which includes the Gateway API experimental channel.
+1. Create a GatewayClass, with a controller name of `projectcontour.io/gateway-controller`.
+1. Create a Gateway using the above GatewayClass.
+
+The Contour Gateway Provisioner will deploy an instance of Contour in the Gateway's namespace implementing the Gateway spec.
+
+**Note:** Gateway names must be 63 characters or shorter, to avoid issues when generating dependent resources. See [projectcontour/contour#5970][13] and [kubernetes-sigs/gateway-api#2592][14] for more information.
+
+## Gateway Listeners
+
+Each unique Gateway Listener port requires the Envoy service to expose that port, and to map it to an underlying port in the Envoy daemonset/deployment that Envoy is configured to listen on.
+For example, the following Gateway Listener configuration (abridged) requires service ports of 80 and 443, mapped to underlying container ports 8080 and 8443:
+
+```yaml
+listeners:
+- name: http
+ protocol: HTTP
+ port: 80
+- name: https
+ protocol: HTTPS
+ port: 443
+```
+
+In dynamic provisioning, the Contour Gateway Provisioner will continuously ensure that the Envoy service and daemonset/deployment are kept in sync with the Gateway Listener configuration.
+In static provisioning, it is up to the platform operator to keep the Envoy resources in sync with the Gateway Listeners.
+
+To get from the Gateway Listener port to the port that Envoy will be configured to listen on, i.e. the container port:
+- add 8000 to the Listener port number
+- if the result is greater than 65535, subtract 65535
+- if the result is less than or equal to 1023, add 1023.
+
+Note that, in rare corner cases, it's possible to have port conflicts.
+Check the Gateway status to ensure that Listeners have been properly provisioned.
+
+## Routing
+
+Gateway API defines multiple route types.
+Each route type is appropriate for a different type of traffic being proxied to a backend service.
+Contour implements `HTTPRoute`, `TLSRoute`, `GRPCRoute` and `TCPRoute`.
+The details of each of these route types are covered in extensive detail on the Gateway API website; the [route resources overview][11] is a good place to start learning about them.
+
+### Routing with HTTPProxy or Ingress
+
+When Gateway API is enabled in Contour, it's still possible to use HTTPProxy or Ingress to define routes, with some limitations.
+This is useful for users who:
+- are in the process of migrating to Gateway API
+- want to use the Contour Gateway Provisioner for dynamic provisioning, but need the advanced features of HTTPProxy
+
+To use HTTPProxy or Ingress with Gateway API, define a Gateway with the following Listeners:
+
+```yaml
+listeners:
+- name: http
+ protocol: HTTP
+ port: 80
+ allowedRoutes:
+ namespaces:
+ from: All
+- name: https
+ protocol: projectcontour.io/https
+ port: 443
+ allowedRoutes:
+ namespaces:
+ from: All
+```
+
+Note that for the second Listener, a Contour-specific protocol is used, and no TLS details are specified.
+Instead, TLS details continue to be configured on the HTTPProxy or Ingress resource.
+
+This is an area of active development and further work will be done in upcoming releases to better support migrations and mixed modes of operation.
+
+## Contour Gateway Provisioner
+
+### Customizing a GatewayClass
+
+Gateway API [supports attaching parameters to a GatewayClass][5], which can customize the Gateways that are provisioned for that GatewayClass.
+
+Contour defines a CRD called `ContourDeployment`, which can be used as `GatewayClass` parameters.
+
+A simple example of a parameterized Contour GatewayClass that provisions Envoy as a Deployment instead of the default DaemonSet looks like:
+
+```yaml
+kind: GatewayClass
+apiVersion: gateway.networking.k8s.io/v1
+metadata:
+ name: contour-with-envoy-deployment
+spec:
+ controllerName: projectcontour.io/gateway-controller
+ parametersRef:
+ kind: ContourDeployment
+ group: projectcontour.io
+ name: contour-with-envoy-deployment-params
+ namespace: projectcontour
+---
+kind: ContourDeployment
+apiVersion: projectcontour.io/v1alpha1
+metadata:
+ namespace: projectcontour
+ name: contour-with-envoy-deployment-params
+spec:
+ envoy:
+ workloadType: Deployment
+```
+
+All Gateways provisioned using the `contour-with-envoy-deployment` GatewayClass would get an Envoy Deployment.
+
+See [the API documentation][6] for all `ContourDeployment` options.
+
+It's important to note that, per the [GatewayClass spec][10]:
+
+> It is recommended that [GatewayClass] be used as a template for Gateways.
+> This means that a Gateway is based on the state of the GatewayClass at the time it was created and changes to the GatewayClass or associated parameters are not propagated down to existing Gateways.
+> This recommendation is intended to limit the blast radius of changes to GatewayClass or associated parameters.
+> If implementations choose to propagate GatewayClass changes to existing Gateways, that MUST be clearly documented by the implementation.
+
+Contour follows the recommended behavior, meaning changes to a GatewayClass and its parameters are not propagated down to existing Gateways.
+
+### Upgrades
+
+When the Contour Gateway Provisioner is upgraded to a new version, it will upgrade all Gateways it controls (both the control plane and the data plane).
+
+## Disabling Experimental Resources
+
+Some users may want to use Contour with the [Gateway API standard channel][4] instead of the experimental channel, to avoid installing alpha resources into their clusters.
+To do this, Contour must be told to disable informers for the experimental resources.
+In the Contour (control plane) deployment, use the `--disable-feature` flag for `contour serve` to disable informers for the experimental resources:
+
+```yaml
+containers:
+- name: contour
+ image: ghcr.io/projectcontour/contour:
+ command: ["contour"]
+ args:
+ - serve
+ - --incluster
+ - --xds-address=0.0.0.0
+ - --xds-port=8001
+ - --contour-cafile=/certs/ca.crt
+ - --contour-cert-file=/certs/tls.crt
+ - --contour-key-file=/certs/tls.key
+ - --config-path=/config/contour.yaml
+ - --disable-feature=tlsroutes
+ - --disable-feature=grpcroutes
+```
+
+[1]: https://gateway-api.sigs.k8s.io/
+[2]: https://gateway-api.sigs.k8s.io/concepts/conformance/#2-support-levels
+[3]: https://gateway-api.sigs.k8s.io/guides/#install-experimental-channel
+[4]: https://gateway-api.sigs.k8s.io/guides/#install-standard-channel
+[5]: https://gateway-api.sigs.k8s.io/api-types/gatewayclass/#gatewayclass-parameters
+[6]: https://projectcontour.io/docs/main/config/api/#projectcontour.io/v1alpha1.ContourDeployment
+[7]: https://projectcontour.io/docs/main/config/api/#projectcontour.io/v1alpha1.GatewayConfig
+[8]: https://gateway-api.sigs.k8s.io/api-types/gatewayclass/#gatewayclass-controller-selection
+[9]: https://projectcontour.io/quickstart/contour-gateway-provisioner.yaml
+[10]: https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.GatewayClass
+[11]: https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-resources
+[12]: /docs/{{< param version >}}/guides/gateway-api
+[13]: https://github.com/projectcontour/contour/issues/5970
+[14]: https://github.com/kubernetes-sigs/gateway-api/issues/2592
\ No newline at end of file
diff --git a/site/content/docs/1.29/config/health-checks.md b/site/content/docs/1.29/config/health-checks.md
new file mode 100644
index 00000000000..6dd1aac619d
--- /dev/null
+++ b/site/content/docs/1.29/config/health-checks.md
@@ -0,0 +1,160 @@
+# Upstream Health Checks
+
+## HTTP Proxy Health Checking
+
+Active health checking can be configured on a per route basis.
+Contour supports HTTP health checking and can be configured with various settings to tune the behavior.
+
+During HTTP health checking Envoy will send an HTTP request to the upstream Endpoints.
+It expects a 200 response by default if the host is healthy (see `expectedStatuses` below for configuring the "healthy" status codes).
+The upstream host can return 503 if it wants to immediately notify Envoy to no longer forward traffic to it.
+It is important to note that these are health checks which Envoy implements and are separate from any other system such as those that exist in Kubernetes.
+
+```yaml
+# httpproxy-health-checks.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: health-check
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: health.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ healthCheckPolicy:
+ path: /healthy
+ intervalSeconds: 5
+ timeoutSeconds: 2
+ unhealthyThresholdCount: 3
+ healthyThresholdCount: 5
+ services:
+ - name: s1-health
+ port: 80
+ - name: s2-health
+ port: 80
+```
+
+Health check configuration parameters:
+
+- `path`: HTTP endpoint used to perform health checks on upstream service (e.g. `/healthz`). It expects a 200 response if the host is healthy. The upstream host can return 503 if it wants to immediately notify downstream hosts to no longer forward traffic to it.
+- `host`: The value of the host header in the HTTP health check request. If left empty (default value), the name "contour-envoy-healthcheck" will be used.
+- `intervalSeconds`: The interval (seconds) between health checks. Defaults to 5 seconds if not set.
+- `timeoutSeconds`: The time to wait (seconds) for a health check response. If the timeout is reached the health check attempt will be considered a failure. Defaults to 2 seconds if not set.
+- `unhealthyThresholdCount`: The number of unhealthy health checks required before a host is marked unhealthy. Note that for http health checking if a host responds with 503 this threshold is ignored and the host is considered unhealthy immediately. Defaults to 3 if not defined.
+- `healthyThresholdCount`: The number of healthy health checks required before a host is marked healthy. Note that during startup, only a single successful health check is required to mark a host healthy.
+- `expectedStatuses`: An optional list of HTTP status ranges that are considered healthy. Ranges follow half-open semantics, meaning the start is inclusive and the end is exclusive. Statuses must be between 100 (inclusive) and 600 (exclusive).
+
+### Non-default expected statuses
+
+By default, only responses with a 200 status code will be considered healthy.
+The set of response codes considered healthy can be customized by specifying ranges in `expectedStatuses`.
+Ranges follow half-open semantics, meaning the start is inclusive and the end is exclusive.
+Statuses must be between 100 (inclusive) and 600 (exclusive).
+For example:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: health-check
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: health.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ healthCheckPolicy:
+ path: /healthy
+ intervalSeconds: 5
+ timeoutSeconds: 2
+ unhealthyThresholdCount: 3
+ healthyThresholdCount: 5
+ # Status codes 200 and 250-299 will be considered healthy.
+ expectedStatuses:
+ - start: 200
+ end: 201
+ - start: 250
+ end: 300
+ services:
+ - name: s1-health
+ port: 80
+ - name: s2-health
+ port: 80
+```
+
+Note that if `expectedStatuses` is specified, `200` must be explicitly included in one of the specified ranges if it is desired as a healthy status code.
+
+## TCP Proxy Health Checking
+
+Contour also supports TCP health checking and can be configured with various settings to tune the behavior.
+
+During TCP health checking Envoy will send a connect-only health check to the upstream Endpoints.
+It is important to note that these are health checks which Envoy implements and are separate from any
+other system such as those that exist in Kubernetes.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: tcp-health-check
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: health.bar.com
+ tcpproxy:
+ healthCheckPolicy:
+ intervalSeconds: 5
+ timeoutSeconds: 2
+ unhealthyThresholdCount: 3
+ healthyThresholdCount: 5
+ services:
+ - name: s1-health
+ port: 80
+ - name: s2-health
+ port: 80
+```
+
+TCP Health check policy configuration parameters:
+
+- `intervalSeconds`: The interval (seconds) between health checks. Defaults to 5 seconds if not set.
+- `timeoutSeconds`: The time to wait (seconds) for a health check response. If the timeout is reached the health check attempt will be considered a failure. Defaults to 2 seconds if not set.
+- `unhealthyThresholdCount`: The number of unhealthy health checks required before a host is marked unhealthy. Note that for http health checking if a host responds with 503 this threshold is ignored and the host is considered unhealthy immediately. Defaults to 3 if not defined.
+- `healthyThresholdCount`: The number of healthy health checks required before a host is marked healthy. Note that during startup, only a single successful health check is required to mark a host healthy.
+
+## Specify the service health check port
+
+contour supports configuring an optional health check port for services.
+
+By default, the service's health check port is the same as the service's routing port.
+If the service's health check port and routing port are different, you can configure the health check port separately.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: health-check
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: health.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ healthCheckPolicy:
+ path: /healthy
+ intervalSeconds: 5
+ timeoutSeconds: 2
+ unhealthyThresholdCount: 3
+ healthyThresholdCount: 5
+ services:
+ - name: s1-health
+ port: 80
+ healthPort: 8998
+ - name: s2-health
+ port: 80
+```
+
+In this example, envoy will send a health check request to port `8998` of the `s1-health` service and port `80` of the `s2-health` service respectively . If the host is healthy, envoy will forward traffic to the `s1-health` service on port `80` and to the `s2-health` service on port `80`.
diff --git a/site/content/docs/1.29/config/inclusion-delegation.md b/site/content/docs/1.29/config/inclusion-delegation.md
new file mode 100644
index 00000000000..b9364ff1fcd
--- /dev/null
+++ b/site/content/docs/1.29/config/inclusion-delegation.md
@@ -0,0 +1,139 @@
+# HTTPProxy Inclusion
+
+HTTPProxy permits the splitting of a system's configuration into separate HTTPProxy instances using **inclusion**.
+
+Inclusion, as the name implies, allows for one HTTPProxy object to be included in another, optionally with some conditions inherited from the parent.
+Contour reads the inclusion tree and merges the included routes into one big object internally before rendering Envoy config.
+Importantly, the included HTTPProxy objects do not have to be in the same namespace.
+
+Each tree of HTTPProxy starts with a root, the top level object of the configuration for a particular virtual host.
+Each root HTTPProxy defines a `virtualhost` key, which describes properties such as the fully qualified name of the virtual host, TLS configuration, etc.
+
+HTTPProxies included from the root must not contain a virtualhost key.
+Root objects cannot include other roots either transitively or directly.
+This permits the owner of an HTTPProxy root to allow the inclusion of a portion of the route space inside a virtual host, and to allow that route space to be further subdivided with inclusions.
+Because the path is not necessarily used as the only key, the route space can be multi-dimensional.
+
+## Conditions and Inclusion
+
+Like Routes, Inclusion may specify a set of [conditions][1].
+These conditions are added to any conditions on the routes included.
+This process is recursive.
+
+Conditions are sets of individual condition statements, for example `prefix: /blog` is the condition that the matching request's path must start with `/blog`.
+When conditions are combined through inclusion Contour merges the conditions inherited via inclusion with any conditions specified on the route.
+This may result in duplicates, for example two `prefix:` conditions, mix of both `prefix:` and `exact` or `prefix` and `regex` conditions, or two header match conditions with the same name and value.
+To resolve this Contour applies the following logic.
+
+- `prefix:` conditions are concatenated together in the order they were applied from the root object. For example the conditions, `prefix: /api`, `prefix: /v1` becomes a single `prefix: /api/v1` conditions. Note: Multiple prefixes cannot be supplied on a single set of Route conditions.
+- `exact:` conditions are also concatenated just like `prefix:` conditions, but `exact:` conditions are not allowed in include match conditions. If the child httpproxy has `exact:` condition then after concatenation, it becomes a single `exact:` condition. For example, `prefix: /static` and `exact: /main.js` become a single `exact: /static/main.js` condition.
+- `regex:` conditions are also concatenated just like `prefix:` conditions, but `regex:` conditions are not allowed in include match conditions. If the child httpproxy has `regex:` condition then after concatenation, it becomes a single `regex:` condition. For example, `prefix: /static` and `regex: /.*/main.js` become a single `regex: /static/.*/main.js` condition.
+- Proxies with repeated identical `header:` conditions of type "exact match" (the same header keys exactly) are marked as "Invalid" since they create an un-routable configuration.
+
+## Configuring Inclusion
+
+Inclusion is a top-level field in the HTTPProxy [spec][2] element.
+It requires one field, `name`, and has two optional fields:
+
+- `namespace`. This will assume the included HTTPProxy is in the same namespace if it's not specified.
+- a `conditions` block.
+
+## Inclusion Within the Same Namespace
+
+HTTPProxies can include other HTTPProxy objects in the namespace by specifying the name of the object and its namespace in the top-level `includes` block.
+Note that `includes` is a list, and so it must use the YAML list construct.
+
+In this example, the HTTPProxy `include-root` has included the configuration for paths matching `/service2` from the HTTPProxy named `service2` in the same namespace as `include-root` (the `default` namespace).
+It's important to note that `service2` HTTPProxy has not defined a `virtualhost` property as it is NOT a root HTTPProxy.
+
+```yaml
+# httpproxy-inclusion-samenamespace.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: include-root
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: root.bar.com
+ includes:
+ # Includes the /service2 path from service2 in the same namespace
+ - name: service2
+ namespace: default
+ conditions:
+ - prefix: /service2
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: service2
+ namespace: default
+spec:
+ routes:
+ - services: # matches /service2
+ - name: s2
+ port: 80
+ - conditions:
+ - prefix: /blog # matches /service2/blog
+ services:
+ - name: blog
+ port: 80
+```
+
+## Inclusion Across Namespaces
+
+Inclusion can also happen across Namespaces by specifying a `namespace` in the `inclusion`.
+This is a particularly powerful paradigm for enabling multi-team Ingress management.
+
+If the `--watch-namespaces` configuration flag is used, it must define all namespaces that will be referenced by the inclusion.
+
+In this example, the root HTTPProxy has included configuration for paths matching `/blog` to the `blog` HTTPProxy object in the `marketing` namespace.
+
+```yaml
+# httpproxy-inclusion-across-namespaces.yaml
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: namespace-include-root
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: ns-root.bar.com
+ includes:
+ # delegate the subpath, `/blog` to the HTTPProxy object in the marketing namespace with the name `blog`
+ - name: blog
+ namespace: marketing
+ conditions:
+ - prefix: /blog
+ routes:
+ - services:
+ - name: s1
+ port: 80
+
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: blog
+ namespace: marketing
+spec:
+ routes:
+ - services:
+ - name: s2
+ port: 80
+```
+
+## Orphaned HTTPProxy children
+
+It is possible for HTTPProxy objects to exist that have not been delegated to by another HTTPProxy.
+These objects are considered "orphaned" and will be ignored by Contour in determining ingress configuration.
+
+[1]: request-routing#conditions
+[2]: api/#projectcontour.io/v1.HTTPProxySpec
diff --git a/site/content/docs/1.29/config/ingress.md b/site/content/docs/1.29/config/ingress.md
new file mode 100644
index 00000000000..22e65bb0255
--- /dev/null
+++ b/site/content/docs/1.29/config/ingress.md
@@ -0,0 +1,94 @@
+# k8s Ingress Resource Support in Contour
+
+
+
+
+
+This document describes Contour's implementation of specific Ingress resource fields and features.
+As the Ingress specification has evolved between v1beta1 and v1, any differences between versions are highlighted to ensure clarity for Contour users.
+
+**Note: As of Contour version 1.16.0, Contour is not compatible with Kubernetes versions that predate Ingress v1. This means Contour 1.16.0 and above require Kubernetes 1.19 and above. The Ingress v1beta1 resource is still available in Kubernetes 1.19 (but will be removed in 1.22) and the API server will convert such resources to Ingress v1 for Contour to subscribe to.**
+
+## Kubernetes Versions
+
+Contour is [validated against Kubernetes release versions N through N-2][1] (with N being the latest release).
+For Kubernetes version 1.19+, the API server translates any Ingress v1beta1 resources to Ingress v1 and Contour watches Ingress v1 resources.
+
+## IngressClass and IngressClass Name
+
+In order to support differentiating between Ingress controllers or multiple instances of a single Ingress controller, users can create an [IngressClass resource][2] and specify an IngressClass name on a Ingress to reference it.
+The IngressClass resource can be used to provide configuration to an Ingress controller watching resources it governs.
+Contour supports watching an IngressClass resource specified with the `--ingress-class-name` flag to the `contour serve` command.
+Contour does not require an IngressClass resource with the name passed in the aforementioned flag to exist, the name can just be used as an identifier for filtering which Ingress resources Contour reconciles into actual route configuration.
+
+Ingresses may specify an IngressClass name via the original annotation method or via the `ingressClassName` spec field.
+As the `ingressClassName` field has been introduced on Ingress v1beta1, there should be no differences in IngressClass name filtering between the two available versions of the resource.
+Contour uses its configured IngressClass name to filter Ingresses.
+If the `--ingress-class-name` flag is provided, Contour will only accept Ingress resources that exactly match the specified IngressClass name via annotation or spec field, with the value in the annotation taking precedence. (The `--ingress-class-name` value can be a comma-separated list of class names to match against.)
+If the flag is not passed to `contour serve` Contour will accept any Ingress resource that specifies the IngressClass name `contour` in annotation or spec fields or does not specify one at all.
+
+## Default Backend
+
+Contour supports the `defaultBackend` Ingress v1 spec field and equivalent `backend` v1beta1 version of the field.
+See upstream [documentation][3] on this field.
+Any requests that do not match an Ingress rule will be forwarded to this backend.
+As TLS secrets on Ingresses are scoped to specific hosts, this default backend cannot serve TLS as it could match an unbounded set of hosts and configuring a matching set of TLS secrets would not be possible.
+As is the case on Ingress rules, Contour only supports configuring a Service as a backend and does not support any other Kubernetes resource.
+
+## Ingress Rules
+
+See upstream [documentation][4] on Ingress rules.
+
+As with default backends, Contour only supports configuring a Service as a backend and does not support any other Kubernetes resource.
+
+Contour supports [wildcard hostnames][5] as documented by the upstream API as well as precise hostnames.
+Wildcard hostnames are limited to the whole first DNS label of the hostname, e.g. `*.foo.com` is valid but `*foo.com`, `foo*.com`, `foo.*.com` are not.
+`*` is also not a valid hostname.
+Precise hostnames in Ingress or HTTPProxy configuration take higher precedence over wildcards.
+For example, given an Ingress rule with the hostname `*.foo.com` routing to `service-a` and another Ingress rule or HTTPProxy route containing a subdomain (say `bar.foo.com`) routing to `service-b`, requests to `bar.foo.com` will be routed to `service-b`.
+The Ingress admission controller validation ensures valid hostnames are present when creating an Ingress resource.
+
+Contour supports all of the various [path matching][6] types described by the Ingress spec.
+Prior to Contour 1.14.0, path match types were ignored and path matching was performed with a Contour specific implementation.
+Paths specified with any regex meta-characters (any of `^+*[]%`) were implemented as regex matches.
+Any other paths were programmed in Envoy as "string prefix" matches.
+This behavior is preserved in the `ImplementationSpecific` match type in Contour 1.14.0+ to ensure backwards compatibility.
+`Exact` path matches will now result in matching requests to the given path exactly.
+The `Prefix` patch match type will now result in matching requests with a "segment prefix" rather than a "string prefix" according to the spec (e.g. the prefix `/foo/bar` will match requests with paths `/foo/bar`, `/foo/bar/`, and `/foo/bar/baz`, but not `/foo/barbaz`).
+
+## TLS
+
+See upstream [documentation][7] on TLS configuration.
+
+A secret specified in an Ingress TLS element will only be applied to Ingress rules with `Host` configuration that exactly matches an element of the TLS `Hosts` field.
+Any secrets that do not match an Ingress rule `Host` will be ignored.
+
+In Ingress v1beta1, the `secretName` field could contain a string with a full `namespace/name` identifier.
+When used with Contour's [TLS certificate delegation][8], this allowed Ingresses to use a TLS certificate from a different namespace.
+However, Ingress v1 does not allow the `secretName` field to contain a string with a full `namespace/name` identifier, because the field validation disallows the `/` character.
+Instead, Ingress v1 resources can now use the `projectcontour.io/tls-cert-namespace` annotation, to define the namespace that contains the TLS certificate (if different than the Ingress's namespace).
+This enables the TLS certificate delegation functionality to continue working for Ingress v1.
+For more information and an example, see the [TLS certificate delegation documentation][8].
+
+## Status
+
+In order to inform users of the address the Services their Ingress resources can be accessed at, Contour sets status on Ingress resources.
+If `contour serve` is run with the `--ingress-status-address` flag, Contour will use the provided value to set the Ingress status address accordingly.
+If not provided, Contour will use the address of the Envoy service using the passed in `--envoy-service-name` and `--envoy-service-namespace` flags.
+
+## Header Manipulation
+
+The Ingress resource does not allow adding or removing HTTP headers on requests or responses.
+However, Contour does allow users to set a global HTTP header [policy configuration][9] which can be optionally applied to configuration generated from Ingress resources.
+Contour enables this behavior with the `applyToIngress` boolean field (set to `true` to enable).
+
+[0]: https://github.com/kubernetes-sigs/ingress-controller-conformance
+[1]: /resources/compatibility-matrix/
+[2]: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class
+[3]: https://kubernetes.io/docs/concepts/services-networking/ingress/#default-backend
+[4]: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules
+[5]: https://kubernetes.io/docs/concepts/services-networking/ingress/#hostname-wildcards
+[6]: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types
+[7]: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls
+[8]: /docs/{{< param version >}}/config/tls-delegation/
+[9]: /docs/{{< param version >}}/configuration/#policy-configuration
diff --git a/site/content/docs/1.29/config/ip-filtering.md b/site/content/docs/1.29/config/ip-filtering.md
new file mode 100644
index 00000000000..161d39bc228
--- /dev/null
+++ b/site/content/docs/1.29/config/ip-filtering.md
@@ -0,0 +1,80 @@
+# IP Filtering
+
+Contour supports filtering requests based on the incoming ip address using Envoy's [RBAC Filter][1].
+
+Requests can be either allowed or denied based on a CIDR range specified on the virtual host and/or individual routes.
+
+If the request's IP address is allowed, the request will be proxied to the appropriate upstream.
+If the request's IP address is denied, an HTTP 403 (Forbidden) will be returned to the client.
+
+## Specifying Rules
+
+Rules are specified with the `ipAllowPolicy` and `ipDenyPolicy` fields on `virtualhost` and `route`:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: basic
+spec:
+ virtualhost:
+ fqdn: foo-basic.bar.com
+ ipAllowPolicy:
+ # traffic is allowed if it came from localhost (i.e. co-located reverse proxy)
+ - cidr: 127.0.0.1/32
+ source: Peer
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+ # route-level ip filters override the virtualhost-level filters
+ ipAllowPolicy:
+ # traffic is allowed if it came from localhost (i.e. co-located reverse proxy)
+ - cidr: 127.0.0.1/32
+ source: Peer
+ # and the request originated from an IP in this range
+ - cidr: 99.99.0.0/16
+ source: Remote
+```
+
+### Specifying CIDR Ranges
+
+CIDR ranges may be ipv4 or ipv6. Bare IP addresses are interpreted as the CIDR range containing that one ip address only.
+
+Examples:
+- `1.1.1.1/24`
+- `127.0.0.1`
+- `2001:db8::68/24`
+- `2001:db8::68`
+
+### Allow vs Deny
+
+Filters are specified as either allow or deny:
+
+- `ipAllowPolicy` only allows requests that match the ip filters.
+- `ipDenyPolicy` denies all requests unless they match the ip filters.
+
+Allow and deny policies cannot both be specified at the same time for a virtual host or route.
+
+### IP Source
+
+The `source` field controls how the ip address is selected from the request for filtering.
+
+- `source: Peer` filter rules will filter using Envoy's [direct_remote_ip][2], which is always the physical peer.
+- `source: Remote` filter rules will filter using Envoy's [remote_ip][3], which may be inferred from the X-Forwarded-For header or proxy protocol.
+
+If using `source: Remote` with `X-Forwarded-For`, it may be necessary to configure Contour's `numTrustedHops` in [Network Parameters][4].
+
+### Virtual Host and Route Filter Precedence
+
+IP filters on the virtual host apply to all routes included in the virtual host, unless the route specifies its own rules.
+
+Rules specified on a route override any rules defined on the virtual host, they are not additive.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/rbac_filter.html
+[2]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/rbac/v3/rbac.proto#envoy-v3-api-field-config-rbac-v3-principal-direct-remote-ip
+[3]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/rbac/v3/rbac.proto#envoy-v3-api-field-config-rbac-v3-principal-remote-ip
+[4]: api/#projectcontour.io/v1.NetworkParameters
+
diff --git a/site/content/docs/1.29/config/jwt-verification.md b/site/content/docs/1.29/config/jwt-verification.md
new file mode 100644
index 00000000000..3f884ad2aef
--- /dev/null
+++ b/site/content/docs/1.29/config/jwt-verification.md
@@ -0,0 +1,182 @@
+# JWT Verification
+
+Contour supports verifying JSON Web Tokens (JWTs) on incoming requests, using Envoy's [jwt_authn HTTP filter][1].
+Specifically, the following properties can be checked:
+- issuer field
+- audiences field
+- signature, using a configured JSON Web Key Store (JWKS)
+- time restrictions (e.g. expiration, not before time)
+
+If verification succeeds, the request will be proxied to the appropriate upstream.
+If verification fails, an HTTP 401 (Unauthorized) will be returned to the client.
+
+JWT verification is only supported on TLS-terminating virtual hosts.
+
+## Configuring providers and rules
+
+A JWT provider is configured for an HTTPProxy's virtual host, and defines how to verify JWTs:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: jwt-verification
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: example.com
+ tls:
+ secretName: example-com-tls-cert
+ jwtProviders:
+ - name: provider-1
+ issuer: example.com
+ audiences:
+ - audience-1
+ - audience-2
+ remoteJWKS:
+ uri: https://example.com/jwks.json
+ timeout: 1s
+ cacheDuration: 5m
+ forwardJWT: true
+ routes:
+ ...
+```
+
+The provider above requires JWTs to have an issuer of example.com, an audience of either audience-1 or audience-2, and a signature that can be verified using the configured JWKS.
+It also forwards the JWT to the backend via the `Authorization` header after successful verification.
+
+To apply a JWT provider as a requirement to a given route, specify a `jwtVerificationPolicy` for the route:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: jwt-verification
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: example.com
+ tls:
+ secretName: example-com-tls-cert
+ jwtProviders:
+ - name: provider-1
+ ...
+ routes:
+ - conditions:
+ - prefix: /
+ jwtVerificationPolicy:
+ require: provider-1
+ services:
+ - name: s1
+ port: 80
+ - conditions:
+ - prefix: /css
+ services:
+ - name: s1
+ port: 80
+```
+
+In the above example, the default route requires requests to carry JWTs that can be verified using provider-1.
+The second route _excludes_ requests to paths starting with `/css` from JWT verification, because it does not have a JWT verification policy.
+
+### Configuring TLS validation for the JWKS server
+
+By default, the JWKS server's TLS certificate will not be validated, but validation can be requested by setting the `spec.virtualhost.jwtProviders[].remoteJWKS.validation` field.
+This field has mandatory `caSecret` and `subjectName` fields, which specify the trusted root certificates with which to validate the server certificate and the expected server name.
+The `caSecret` can be a namespaced name of the form `/`.
+If the CA secret's namespace is not the same namespace as the `HTTPProxy` resource, [TLS Certificate Delegation][5] must be used to allow the owner of the CA certificate secret to delegate, for the purposes of referencing the CA certificate in a different namespace, permission to Contour to read the Secret object from another namespace.
+
+**Note:** If `spec.virtualhost.jwtProviders[].remoteJWKS.validation` is present, `spec.virtualhost.jwtProviders[].remoteJWKS.uri` must have a scheme of `https`.
+
+## Setting a default provider
+
+The previous section showed how to explicitly require JWT providers for specific routes.
+An alternate approach is to define a JWT provider as the default by specifying `default: true` for it, in which case it is automatically applied to all routes unless they disable JWT verification.
+The example from the previous section could alternately be configured as follows:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: jwt-verification
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: example.com
+ tls:
+ secretName: example-com-tls-cert
+ jwtProviders:
+ - name: provider-1
+ default: true
+ ...
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+ - conditions:
+ - prefix: /css
+ jwtVerificationPolicy:
+ disabled: true
+ services:
+ - name: s1
+ port: 80
+```
+
+In this case, the default route automatically has provider-1 applied, while the `/css` route explicitly disables JWT verification.
+
+One scenario where setting a default provider can be particularly useful is when using [HTTPProxy inclusion][2].
+Setting a default provider in the root HTTPProxy allows all routes in the child HTTPProxies to automatically have JWT verification applied.
+For example:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: jwt-verification-root
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: example.com
+ tls:
+ secretName: example-com-tls-cert
+ jwtProviders:
+ - name: provider-1
+ default: true
+ ...
+ includes:
+ - name: jwt-verification-child
+ namespace: default
+ conditions:
+ - prefix: /blog
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: jwt-verification-child
+ namespace: default
+spec:
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+```
+
+In this case, all routes in the child HTTPProxy will automatically have JWT verification applied, without the owner of this HTTPProxy needing to configure it explicitly.
+
+## API documentation
+
+For more information on the HTTPProxy API for JWT verification, see:
+
+- [JWTProvider][3]
+- [JWTVerificationPolicy][4]
+
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/jwt_authn_filter
+[2]: /docs/{{< param version >}}/config/inclusion-delegation/
+[3]: /docs/{{< param version >}}/config/api/#projectcontour.io/v1.JWTProvider
+[4]: /docs/{{< param version >}}/config/api/#projectcontour.io/v1.JWTVerificationPolicy
+[5]: tls-delegation.md
diff --git a/site/content/docs/1.29/config/overload-manager.md b/site/content/docs/1.29/config/overload-manager.md
new file mode 100644
index 00000000000..33c96532eba
--- /dev/null
+++ b/site/content/docs/1.29/config/overload-manager.md
@@ -0,0 +1,30 @@
+# Overload Manager
+
+Envoy uses heap memory when processing requests.
+When the system runs out of memory or memory resource limit for the container is reached, Envoy process is terminated abruptly.
+To avoid this, Envoy [overload manager][1] can be enabled.
+Overload manager controls how much memory Envoy will allocate at maximum and what actions it takes when the limit is reached.
+
+Overload manager is disabled by default.
+It can be enabled at deployment time by using `--overload-max-heap=[MAX_BYTES]` command line flag in [`contour bootstrap`][2] command.
+The bootstrap command is executed in [init container of Envoy pod][3] to generate initial configuration for Envoy.
+To enable overload manager, modify the deployment manifest and add for example `--overload-max-heap=2147483648` to set maximum heap size to 2 GiB.
+The appropriate number of bytes can be different from system to system.
+
+After the feature is enabled, following two overload actions are configured to Envoy:
+
+* Shrink heap action is executed when 95% of the maximum heap size is reached.
+* Envoy will stop accepting requests when 98% of the maximum heap size is reached.
+
+When requests are denied due to high memory pressure, `503 Service Unavailable` will be returned with a response body containing text `envoy overloaded`.
+Shrink heap action will try to free unused heap memory, eventually allowing requests to be processed again.
+
+**NOTE:**
+The side effect of overload is that Envoy will deny also requests `/ready` and `/stats` endpoints.
+This is due to the way how Contour secures Envoy's admin API and exposes only selected admin API endpoints by proxying itself.
+When readiness probe fails, the overloaded Envoy will be removed from the list of service endpoints.
+If the maximum heap size is set too low, Envoy may be unable to free enough memory and never become ready again.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/overload_manager/overload_manager
+[2]: ../configuration#bootstrap-flags
+[3]: https://github.com/projectcontour/contour/blob/cbec8eca9e8b639318588c5aa7ec0b5b751938c5/examples/render/contour.yaml#L5204-L5216
diff --git a/site/content/docs/1.29/config/rate-limiting.md b/site/content/docs/1.29/config/rate-limiting.md
new file mode 100644
index 00000000000..7a69c22c079
--- /dev/null
+++ b/site/content/docs/1.29/config/rate-limiting.md
@@ -0,0 +1,366 @@
+# Rate Limiting
+
+- [Overview](#overview)
+- [Local Rate Limiting](#local-rate-limiting)
+- [Global Rate Limiting](#global-rate-limiting)
+
+## Overview
+
+Rate limiting is a means of protecting backend services against unwanted traffic.
+This can be useful for a variety of different scenarios:
+
+- Protecting against denial-of-service (DoS) attacks by malicious actors
+- Protecting against DoS incidents due to bugs in client applications/services
+- Enforcing usage quotas for different classes of clients, e.g. free vs. paid tiers
+- Controlling resource consumption/cost
+
+Envoy supports two forms of HTTP rate limiting: **local** and **global**.
+
+In local rate limiting, rate limits are enforced by each Envoy instance, without any communication with other Envoys or any external service.
+
+In global rate limiting, an external rate limit service (RLS) is queried by each Envoy via gRPC for rate limit decisions.
+
+Contour supports both forms of Envoy's rate limiting.
+
+## Local Rate Limiting
+
+The `HTTPProxy` API supports defining local rate limit policies that can be applied to either individual routes or entire virtual hosts.
+Local rate limit policies define a maximum number of requests per unit of time that an Envoy should proxy to the upstream service.
+Requests beyond the defined limit will receive a `429 (Too Many Requests)` response by default.
+Local rate limit policies program Envoy's [HTTP local rate limit filter][1].
+
+It's important to note that local rate limit policies apply *per Envoy pod*.
+For example, a local rate limit policy of 100 requests per second for a given route will result in *each Envoy pod* allowing up to 100 requests per second for that route.
+
+### Defining a local rate limit
+
+Local rate limit policies can be defined for either routes or virtual hosts. A local rate limit policy requires a `requests` and a `units` field, defining the *number of requests per unit of time* that are allowed. `Requests` must be a positive integer, and `units` can be `second`, `minute`, or `hour`. Optionally, a `burst` parameter can also be provided, defining the number of requests above the baseline rate that are allowed in a short period of time. This would allow occasional larger bursts of traffic not to be rate limited.
+
+Local rate limiting for the virtual host:
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: ratelimited-vhost
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ rateLimitPolicy:
+ local:
+ requests: 100
+ unit: hour
+ burst: 20
+ routes:
+ - conditions:
+ - prefix: /s1
+ services:
+ - name: s1
+ port: 80
+ - conditions:
+ - prefix: /s2
+ services:
+ - name: s2
+ port: 80
+```
+
+Local rate limiting for the route:
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: ratelimited-route
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /s1
+ services:
+ - name: s1
+ port: 80
+ rateLimitPolicy:
+ local:
+ requests: 20
+ unit: minute
+ - conditions:
+ - prefix: /s2
+ services:
+ - name: s2
+ port: 80
+```
+
+### Customizing the response
+
+#### Response code
+
+By default, Envoy returns a `429 (Too Many Requests)` when a request is rate limited.
+A non-default response code can optionally be configured as part of the local rate limit policy, in the `responseStatusCode` field.
+The value must be in the 400-599 range.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: custom-ratelimit-response
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /s1
+ services:
+ - name: s1
+ port: 80
+ rateLimitPolicy:
+ local:
+ requests: 20
+ unit: minute
+ responseStatusCode: 503 # Service Unavailable
+```
+
+#### Headers
+
+Headers can optionally be added to rate limited responses, by configuring the `responseHeadersToAdd` field.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: custom-ratelimit-response
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /s1
+ services:
+ - name: s1
+ port: 80
+ rateLimitPolicy:
+ local:
+ requests: 20
+ unit: minute
+ responseHeadersToAdd:
+ - name: x-contour-ratelimited
+ value: "true"
+```
+
+## Global Rate Limiting
+
+The `HTTPProxy` API also supports defining global rate limit policies on routes and virtual hosts.
+
+In order to use global rate limiting, you must first select and deploy an external rate limit service (RLS).
+There is an [Envoy rate limit service implementation][2], but any service that implements the [RateLimitService gRPC interface][3] is supported.
+
+### Configuring an external RLS with Contour
+
+Once you have deployed your RLS, you must configure it with Contour.
+
+Define an extension service for it (substituting values as appropriate):
+```yaml
+apiVersion: projectcontour.io/v1alpha1
+kind: ExtensionService
+metadata:
+ namespace: projectcontour
+ name: ratelimit
+spec:
+ protocol: h2
+ services:
+ - name: ratelimit
+ port: 8081
+```
+
+Now add a reference to it in the Contour config file:
+```yaml
+rateLimitService:
+ # The namespace/name of the extension service.
+ extensionService: projectcontour/ratelimit
+ # The domain value to pass to the RLS for all rate limit
+ # requests. Acts as a container for a set of rate limit
+ # definitions within the RLS.
+ domain: contour
+ # Whether to allow requests to proceed when the rate limit
+ # service fails to respond with a valid rate limit decision
+ # within the timeout defined on the extension service.
+ failOpen: true
+```
+
+### Defining a global rate limit policy
+
+Global rate limit policies can be defined for either routes or virtual hosts. Unlike local rate limit policies, global rate limit policies do not directly define a rate limit. Instead, they define a set of request descriptors that will be generated and sent to the external RLS for each request. The external RLS then makes the rate limit decision based on the descriptors and returns a response to Envoy.
+
+A global rate limit policy for the virtual host:
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: ratelimited-vhost
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ rateLimitPolicy:
+ global:
+ descriptors:
+ # the first descriptor has a single key-value pair:
+ # [ remote_address= ].
+ - entries:
+ - remoteAddress: {}
+ # the second descriptor has two key-value pairs:
+ # [ remote_address=, vhost=local.projectcontour.io ].
+ - entries:
+ - remoteAddress: {}
+ - genericKey:
+ key: vhost
+ value: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /s1
+ services:
+ - name: s1
+ port: 80
+ - conditions:
+ - prefix: /s2
+ services:
+ - name: s2
+ port: 80
+```
+
+A global rate limit policy for the route:
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: ratelimited-route
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /s1
+ services:
+ - name: s1
+ port: 80
+ rateLimitPolicy:
+ global:
+ descriptors:
+ # the first descriptor has a single key-value pair:
+ # [ remote_address= ].
+ - entries:
+ - remoteAddress: {}
+ # the second descriptor has two key-value pairs:
+ # [ remote_address=, prefix=/s1 ].
+ - entries:
+ - remoteAddress: {}
+ - genericKey:
+ key: prefix
+ value: /s1
+ - conditions:
+ - prefix: /s2
+ services:
+ - name: s2
+ port: 80
+```
+
+#### Descriptors & descriptor entries
+
+A descriptor is a list of key-value pairs, i.e. entries, that are generated for a request. The entries can be generated based on different criteria. If any entry in a descriptor cannot generate a key-value pair for a given request, then the entire descriptor is not generated (see the [Envoy documentation][8] for more information). When a global rate limit policy defines multiple descriptors, then *all* descriptors that can be generated will be generated and sent to the rate limit service for consideration.
+
+Below are the supported types of descriptor entries.
+
+##### GenericKey
+
+A `GenericKey` descriptor entry defines a static key-value pair. For example:
+
+```yaml
+rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - genericKey:
+ key: virtual-host-name
+ value: foo.bar.com
+```
+
+Produces a descriptor entry of `virtual-host-name=foo.bar.com`.
+
+The `key` field is optional and defaults to a value of `generic_key` if not specified.
+
+See the [Envoy documentation][4] for more information and examples.
+
+##### RemoteAddress
+
+A `RemoteAddress` descriptor entry has a key of `remote_address` and a value of the client IP address (using the trusted address from `x-forwarded-for`). For example:
+
+```yaml
+rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - remoteAddress: {}
+```
+
+Produces a descriptor entry of `remote_address=`.
+
+See the [Envoy documentation][5] for more information and examples.
+
+##### RequestHeader
+
+A `RequestHeader` descriptor entry has a static key and a value equal to the value of a specified header on the client request. If the header is not present, the descriptor entry is not generated. For example:
+
+```yaml
+rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - requestHeader:
+ headerName: My-Header
+ descriptorKey: my-header-value
+```
+
+Produces a descriptor entry of `my-header-value=`, for a client request that has the `My-Header` header.
+
+See the [Envoy documentation][6] for more information and examples.
+
+##### RequestHeaderValueMatch
+
+A `RequestHeaderValueMatch` descriptor entry has a key of `header_match` and a static value. The entry is only generated if the client request's headers match a specified set of criteria. For example:
+
+```yaml
+rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - requestHeaderValueMatch:
+ headers:
+ - name: My-Header
+ notpresent: true
+ - name: My-Other-Header
+ contains: contour
+ expectMatch: true
+ value: foo
+```
+
+Produces a descriptor entry of `header_match=foo`, for a client request that does not have the `My-Header` header, and does have the `My-Other-Header` header, with a value containing the substring "contour".
+
+Contour supports `present`, `notpresent`, `contains`, `notcontains`, `exact`, and `notexact` header match operators.
+
+The `expectMatch` field defaults to true if not specified. If true, the client request's headers must positively match the specified criteria in order for the descriptor entry to be generated. If false, the client request's header must *not* match the specified criteria in order for the descriptor entry to be generated.
+
+See the [Envoy documentation][7] for more information and examples.
+
+
+
+[1]: https://www.envoyproxy.io/docs/envoy/v1.17.0/configuration/http/http_filters/local_rate_limit_filter#config-http-filters-local-rate-limit
+[2]: https://github.com/envoyproxy/ratelimit
+[3]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/ratelimit/v3/rls.proto
+[4]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-msg-config-route-v3-ratelimit-action-generickey
+[5]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#config-route-v3-ratelimit-action-remoteaddress
+[6]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#config-route-v3-ratelimit-action-requestheaders
+[7]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#config-route-v3-ratelimit-action-headervaluematch
+[8]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/rate_limit_filter#composing-actions
diff --git a/site/content/docs/1.29/config/request-rewriting.md b/site/content/docs/1.29/config/request-rewriting.md
new file mode 100644
index 00000000000..88fa3cc2508
--- /dev/null
+++ b/site/content/docs/1.29/config/request-rewriting.md
@@ -0,0 +1,337 @@
+# Request Rewriting
+
+## Path Rewriting
+
+HTTPProxy supports rewriting the HTTP request URL path prior to delivering the request to the backend service.
+Rewriting is performed after a routing decision has been made, and never changes the request destination.
+
+The `pathRewritePolicy` field specifies how the path prefix should be rewritten.
+The `replacePrefix` rewrite policy specifies a replacement string for a HTTP request path prefix match.
+When this field is present, the path prefix that the request matched is replaced by the text specified in the `replacement` field.
+If the HTTP request path is longer than the matched prefix, the remainder of the path is unchanged.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: rewrite-example
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: rewrite.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ pathRewritePolicy:
+ replacePrefix:
+ - replacement: /new/prefix
+```
+
+The `replacePrefix` field accepts an array of possible replacements.
+When more than one `replacePrefix` array element is present, the `prefix` field can be used to disambiguate which replacement to apply.
+
+If no `prefix` field is present, the replacement is applied to all prefix matches made against the route.
+If a `prefix` field is present, the replacement is applied only to routes that have an exactly matching prefix condition.
+Specifying more than one `replacePrefix` entry is mainly useful when a HTTPProxy document is included into multiple parent documents.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: rewrite-example
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: rewrite.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ conditions:
+ - prefix: /v1/api
+ pathRewritePolicy:
+ replacePrefix:
+ - prefix: /v1/api
+ replacement: /app/api/v1
+ - prefix: /
+ replacement: /app
+```
+
+## Header Rewriting
+
+HTTPProxy supports rewriting HTTP request and response headers.
+The `Set` operation sets a HTTP header value, creating it if it doesn't already exist or overwriting it if it does.
+The `Remove` operation removes a HTTP header.
+The `requestHeadersPolicy` field is used to rewrite headers on a HTTP request, and the `responseHeadersPolicy` is used to rewrite headers on a HTTP response.
+These fields can be specified on a route or on a specific service, depending on the rewrite granularity you need.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: header-rewrite-example
+spec:
+ virtualhost:
+ fqdn: header.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ requestHeadersPolicy:
+ set:
+ - name: Host
+ value: external.dev
+ remove:
+ - Some-Header
+ - Some-Other-Header
+```
+
+Manipulating headers is also supported per-Service or per-Route. Headers can be set or
+removed from the request or response as follows:
+
+per-Service:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: header-manipulation
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: headers.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ requestHeadersPolicy:
+ set:
+ - name: X-Foo
+ value: bar
+ remove:
+ - X-Baz
+ responseHeadersPolicy:
+ set:
+ - name: X-Service-Name
+ value: s1
+ remove:
+ - X-Internal-Secret
+```
+
+per-Route:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: header-manipulation
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: headers.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ requestHeadersPolicy:
+ set:
+ - name: X-Foo
+ value: bar
+ remove:
+ - X-Baz
+ responseHeadersPolicy:
+ set:
+ - name: X-Service-Name
+ value: s1
+ remove:
+ - X-Internal-Secret
+```
+
+In these examples we are setting the header `X-Foo` with value `baz` on requests
+and stripping `X-Baz`. We are then setting `X-Service-Name` on the response with
+value `s1`, and removing `X-Internal-Secret`.
+
+### Dynamic Header Values
+
+It is sometimes useful to set a header value using a dynamic value such as the
+hostname where the Envoy Pod is running (`%HOSTNAME%`) or the subject of the
+TLS client certificate (`%DOWNSTREAM_PEER_SUBJECT%`) or based on another header
+(`%REQ(header)%`).
+
+Examples:
+```
+ requestHeadersPolicy:
+ set:
+ - name: X-Envoy-Hostname
+ value: "%HOSTNAME%"
+ - name: X-Host-Protocol
+ value: "%REQ(Host)% - %PROTOCOL%"
+ responseHeadersPolicy:
+ set:
+ - name: X-Envoy-Response-Flags
+ value: "%RESPONSE_FLAGS%"
+```
+
+Contour supports most of the custom request/response header variables offered
+by Envoy - see the Envoy
+documentation for details of what each of these resolve to:
+
+* `%DOWNSTREAM_REMOTE_ADDRESS%`
+* `%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%`
+* `%DOWNSTREAM_LOCAL_ADDRESS%`
+* `%DOWNSTREAM_LOCAL_ADDRESS_WITHOUT_PORT%`
+* `%DOWNSTREAM_LOCAL_PORT%`
+* `%DOWNSTREAM_LOCAL_URI_SAN%`
+* `%DOWNSTREAM_PEER_URI_SAN%`
+* `%DOWNSTREAM_LOCAL_SUBJECT%`
+* `%DOWNSTREAM_PEER_SUBJECT%`
+* `%DOWNSTREAM_PEER_ISSUER%`
+* `%DOWNSTREAM_TLS_SESSION_ID%`
+* `%DOWNSTREAM_TLS_CIPHER%`
+* `%DOWNSTREAM_TLS_VERSION%`
+* `%DOWNSTREAM_PEER_FINGERPRINT_256%`
+* `%DOWNSTREAM_PEER_FINGERPRINT_1%`
+* `%DOWNSTREAM_PEER_SERIAL%`
+* `%DOWNSTREAM_PEER_CERT%`
+* `%DOWNSTREAM_PEER_CERT_V_START%`
+* `%DOWNSTREAM_PEER_CERT_V_END%`
+* `%HOSTNAME%`
+* `%REQ(header-name)%`
+* `%PROTOCOL%`
+* `%RESPONSE_FLAGS%`
+* `%RESPONSE_CODE_DETAILS%`
+* `%UPSTREAM_REMOTE_ADDRESS%`
+
+Note that Envoy passes variables that can't be expanded through unchanged or
+skips them entirely - for example:
+* `%UPSTREAM_REMOTE_ADDRESS%` as a request header remains as
+ `%UPSTREAM_REMOTE_ADDRESS%` because as noted in the Envoy docs: "The upstream
+ remote address cannot be added to request headers as the upstream host has not
+ been selected when custom request headers are generated."
+* `%DOWNSTREAM_TLS_VERSION%` is skipped if TLS is not in use
+* Envoy ignores REQ headers that refer to an non-existent header - for example
+ `%REQ(Host)%` works as expected but `%REQ(Missing-Header)%` is skipped
+
+Contour already sets the `X-Request-Start` request header to
+`t=%START_TIME(%s.%3f)%` which is the Unix epoch time when the request
+started.
+
+To enable setting header values based on the destination service Contour also supports:
+
+* `%CONTOUR_NAMESPACE%`
+* `%CONTOUR_SERVICE_NAME%`
+* `%CONTOUR_SERVICE_PORT%`
+
+For example, with the following HTTPProxy object that has a per-Service requestHeadersPolicy using these variables:
+```
+# httpproxy.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: basic
+ namespace: myns
+spec:
+ virtualhost:
+ fqdn: foo-basic.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+ requestHeadersPolicy:
+ set:
+ - name: l5d-dst-override
+ value: "%CONTOUR_SERVICE_NAME%.%CONTOUR_NAMESPACE%.svc.cluster.local:%CONTOUR_SERVICE_PORT%"
+```
+the values would be:
+* `CONTOUR_NAMESPACE: "myns"`
+* `CONTOUR_SERVICE_NAME: "s1"`
+* `CONTOUR_SERVICE_PORT: "80"`
+
+and the `l5-dst-override` header would be set to `s1.myns.svc.cluster.local:80`.
+
+For per-Route requestHeadersPolicy only `%CONTOUR_NAMESPACE%` is set and using
+`%CONTOUR_SERVICE_NAME%` and `%CONTOUR_SERVICE_PORT%` will end up as the
+literal values `%%CONTOUR_SERVICE_NAME%%` and `%%CONTOUR_SERVICE_PORT%%`,
+respectively.
+
+### Manipulating the Host header
+
+Contour allows users to manipulate the host header in two ways, using the `requestHeadersPolicy`.
+
+#### Static rewrite
+
+You can set the host to a static value. This can be done on the route and service level.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: static-host-header-rewrite-route
+spec:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+ - requestHeaderPolicy:
+ set:
+ - name: host
+ value: foo.com
+```
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: static-host-header-rewrite-service
+spec:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+ - requestHeaderPolicy:
+ set:
+ - name: host
+ value: "foo.com"
+```
+
+#### Dynamic rewrite
+
+You can also set the host header dynamically with the content of an existing header.
+The format has to be `"%REQ()%"`. If the header is empty, it is ignored.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: dynamic-host-header-rewrite-route
+spec:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+ - requestHeaderPolicy:
+ set:
+ - name: host
+ value: "%REQ(x-rewrite-header)%"
+```
+
+Note: Only one of static or dynamic host rewrite can be specified.
+
+Note: Dynamic rewrite is only available at the route level and not possible on the service level.
+
+Note: Pay attention to the potential security implications of using this option, the provided header must come from a trusted source.
+
+Note: The header rewrite is only done while forwarding and has no bearing on the routing decision.
diff --git a/site/content/docs/1.29/config/request-routing.md b/site/content/docs/1.29/config/request-routing.md
new file mode 100644
index 00000000000..19ef5386e86
--- /dev/null
+++ b/site/content/docs/1.29/config/request-routing.md
@@ -0,0 +1,535 @@
+# Request Routing
+
+A HTTPProxy object must have at least one route or include defined.
+In this example, any requests to `multi-path.bar.com/blog` or `multi-path.bar.com/blog/*` will be routed to the Service `s2` using the prefix conditions. Requests to `multi-path.bar.com/feed` will be routed to Service `s2` using exact match condition.
+All other requests to the host `multi-path.bar.com` will be routed to the Service `s1`.
+
+```yaml
+# httpproxy-multiple-paths.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: multiple-paths
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: multi-path.bar.com
+ routes:
+ - conditions:
+ - prefix: / # matches everything else
+ services:
+ - name: s1
+ port: 80
+ - conditions:
+ - prefix: /blog # matches `multi-path.bar.com/blog` or `multi-path.bar.com/blog/*`
+ services:
+ - name: s2
+ port: 80
+ - conditions:
+ - exact: /feed # matches `multi-path.bar.com/feed` only
+ services:
+ - name: s2
+ port: 80
+```
+
+In the following example, we match on headers and query parameters and send to different services, with a default route if those do not match.
+
+```yaml
+# httpproxy-multiple-headers.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: multiple-paths
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: multi-path.bar.com
+ routes:
+ - conditions:
+ - header:
+ name: x-os
+ contains: ios
+ services:
+ - name: s1
+ port: 80
+ - conditions:
+ - header:
+ name: x-os
+ contains: android
+ services:
+ - name: s2
+ port: 80
+ - conditions:
+ - queryParameter:
+ name: os
+ exact: other
+ ignoreCase: true
+ services:
+ - name: s3
+ port: 80
+ - services:
+ - name: s4
+ port: 80
+```
+
+## Conditions
+
+Each Route entry in a HTTPProxy **may** contain one or more conditions.
+These conditions are combined with an AND operator on the route passed to Envoy.
+Conditions can be either a `prefix`, `exact`, `regex`, `header` or a `queryParameter` condition. At most one of `prefix`, `exact` or `regex` can be used in one condition block.
+
+#### Prefix conditions
+
+Paths defined are matched using prefix conditions.
+Up to one prefix condition may be present in any condition block.
+
+Prefix conditions **must** start with a `/` if they are present.
+
+#### Exact conditions
+
+Paths defined are matched using exact conditions.
+Up to one exact condition may be present in any condition block. Any condition block can
+either have a regex condition, exact condition or prefix condition, but not multiple together. Exact conditions are
+only allowed in route match conditions and not in include match conditions.
+
+Exact conditions **must** start with a `/` if they are present.
+
+#### Regex conditions
+
+Paths defined are matched using regex expressions.
+Up to one regex condition may be present in any condition block. Any condition block can
+either have a regex condition, exact condition or prefix condition, but not multiple together. Regex conditions are
+only allowed in route match conditions and not in include match conditions.
+
+Regex conditions **must** start with a `/` if they are present.
+
+#### Header conditions
+
+For `header` conditions there is the following structure:
+
+1. one required field, `name`
+2. six operator fields: `present`, `notpresent`, `contains`, `notcontains`, `exact`, and `notexact`
+3. two optional modifiers: `ignoreCase` and `treatMissingAsEmpty`
+
+Operators:
+- `present` is a boolean and checks that the header is present. The value will not be checked.
+
+- `notpresent` similarly checks that the header is *not* present.
+
+- `contains` is a string, and checks that the header contains the string. `notcontains` similarly checks that the header does *not* contain the string.
+
+- `exact` is a string, and checks that the header exactly matches the whole string. `notexact` checks that the header does *not* exactly match the whole string.
+
+- `regex` is a string representing a regular expression, and checks that the header value matches against the given regular expression.
+
+Modifiers:
+- `ignoreCase`: IgnoreCase specifies that string matching should be case insensitive. It has no effect on the `Regex` parameter.
+- `treatMissingAsEmpty`: specifies if the header match rule specified header does not exist, this header value will be treated as empty. Defaults to false. Unlike the underlying Envoy implementation this is **only** supported for negative matches (e.g. NotContains, NotExact).
+
+#### Query parameter conditions
+
+Similar to the `header` conditions, `queryParameter` conditions also require the
+`name` field to be specified, which represents the name of the query parameter
+e.g. `search` when the query string looks like `/?search=term` and `term`
+representing the value.
+
+There are six operator fields: `exact`, `prefix`, `suffix`, `regex`, `contains`
+and `present` and a modifier `ignoreCase` which can be used together with all of
+the operator fields except `regex` and `present`.
+
+- `exact` is a string, and checks that the query parameter value exactly matches
+ the whole string.
+
+- `prefix` is a string, and checks that the query parameter value is prefixed by
+ the given value.
+
+- `suffix` is a string, and checks that the query parameter value is suffixed by
+ the given value.
+
+- `regex` is a string representing a regular expression, and checks that the
+ query parameter value matches against the given regular expression.
+
+- `contains` is a string, and checks that the query parameter value contains
+ the given string.
+
+- `present` is a boolean, and checks that the query parameter is present. The
+ value will not be checked.
+
+- `ignoreCase` is a boolean, and if set to `true` it will enable case
+ insensitive matching for any of the string operator matching methods.
+
+## Request Redirection
+
+HTTP redirects can be implemented in HTTPProxy using `requestRedirectPolicy` on a route.
+In the following basic example, requests to `example.com` are redirected to `www.example.com`.
+We configure a root HTTPProxy for `example.com` that contains redirect configuration.
+We also configure a root HTTPProxy for `www.example.com` that represents the destination of the redirect.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: example-com
+spec:
+ virtualhost:
+ fqdn: example.com
+ routes:
+ - conditions:
+ - prefix: /
+ requestRedirectPolicy:
+ hostname: www.example.com
+```
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: www-example-com
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1
+ port: 80
+```
+
+In addition to specifying the hostname to set in the `location` header, the scheme, port, and returned status code of the redirect response can be configured.
+Configuration of the path or a path prefix replacement to modify the path of the returned `location` can be included as well.
+See [the API specification][3] for more detail.
+
+## Multiple Upstreams
+
+One of the key HTTPProxy features is the ability to support multiple services for a given path:
+
+```yaml
+# httpproxy-multiple-upstreams.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: multiple-upstreams
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: multi.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ - name: s2
+ port: 80
+```
+
+In this example, requests for `multi.bar.com/` will be load balanced across two Kubernetes Services, `s1`, and `s2`.
+This is helpful when you need to split traffic for a given URL across two different versions of an application.
+
+### Upstream Weighting
+
+Building on multiple upstreams is the ability to define relative weights for upstream Services.
+This is commonly used for canary testing of new versions of an application when you want to send a small fraction of traffic to a specific Service.
+
+```yaml
+# httpproxy-weight-shifting.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: weight-shifting
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: weights.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ weight: 10
+ - name: s2
+ port: 80
+ weight: 90
+```
+
+In this example, we are sending 10% of the traffic to Service `s1`, while Service `s2` receives the remaining 90% of traffic.
+
+HTTPProxy weighting follows some specific rules:
+
+- If no weights are specified for a given route, it's assumed even distribution across the Services.
+- Weights are relative and do not need to add up to 100. If all weights for a route are specified, then the "total" weight is the sum of those specified. As an example, if weights are 20, 30, 20 for three upstreams, the total weight would be 70. In this example, a weight of 30 would receive approximately 42.9% of traffic (30/70 = .4285).
+- If some weights are specified but others are not, then it's assumed that upstreams without weights have an implicit weight of zero, and thus will not receive traffic.
+
+### Traffic mirroring
+
+Per route, a service can be nominated as a mirror.
+The mirror service will receive a copy of the read traffic sent to any non mirror service.
+The mirror traffic is considered _read only_, any response by the mirror will be discarded.
+
+This service can be useful for recording traffic for later replay or for smoke testing new deployments.
+
+`weight` can be optionally set (in the space of integers 1-100) to mirror the corresponding percent of traffic (ie. `weight: 5` mirrors 5% of traffic). Omitting the `weight` field results in 100% traffic mirroring. There is unexpected behavior if `weight` is explicitly set to 0, 100% traffic will be mirrored. This occurs because we cannot distinguish undefined variables from explicitly setting them to default values, and omission of a `weight` must mirror full traffic.
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: traffic-mirror
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: www
+ port: 80
+ - name: www-mirror
+ port: 80
+ mirror: true
+```
+
+## Response Timeouts
+
+Each Route can be configured to have a timeout policy and a retry policy as shown:
+
+```yaml
+# httpproxy-response-timeout.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: response-timeout
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: timeout.bar.com
+ routes:
+ - timeoutPolicy:
+ response: 1s
+ idle: 10s
+ idleConnection: 60s
+ retryPolicy:
+ count: 3
+ perTryTimeout: 150ms
+ services:
+ - name: s1
+ port: 80
+```
+
+In this example, requests to `timeout.bar.com/` will have a response timeout policy of 1s.
+This refers to the time that spans between the point at which complete client request has been processed by the proxy, and when the response from the server has been completely processed.
+
+- `timeoutPolicy.response` Timeout for receiving a response from the server after processing a request from client.
+If not supplied, Envoy's default value of 15s applies.
+More information can be found in [Envoy's documentation][4].
+- `timeoutPolicy.idle` Timeout for how long the proxy should wait while there is no activity during single request/response (for HTTP/1.1) or stream (for HTTP/2).
+Timeout will not trigger while HTTP/1.1 connection is idle between two consecutive requests.
+If not specified, there is no per-route idle timeout, though a connection manager-wide stream idle timeout default of 5m still applies.
+More information can be found in [Envoy's documentation][6].
+- `timeoutPolicy.idleConnection` Timeout for how long connection from the proxy to the upstream service is kept when there are no active requests.
+If not supplied, Envoy’s default value of 1h applies.
+More information can be found in [Envoy's documentation][8].
+
+TimeoutPolicy durations are expressed in the Go [Duration format][5].
+Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
+The string "infinity" is also a valid input and specifies no timeout.
+A value of "0s" will be treated as if the field were not set, i.e. by using Envoy's default behavior.
+Example input values: "300ms", "5s", "1m".
+
+- `retryPolicy`: A retry will be attempted if the server returns an error code in the 5xx range, or if the server takes more than `retryPolicy.perTryTimeout` to process a request.
+
+- `retryPolicy.count` specifies the maximum number of retries allowed. This parameter is optional and defaults to 1. Set to -1 to disable. If set to 0, the Envoy default of 1 is used.
+
+- `retryPolicy.perTryTimeout` specifies the timeout per retry. If this field is greater than the request timeout, it is ignored. This parameter is optional.
+ If left unspecified, `timeoutPolicy.request` will be used.
+
+## Load Balancing Strategy
+
+Each route can have a load balancing strategy applied to determine which of its Endpoints is selected for the request.
+The following list are the options available to choose from:
+
+- `RoundRobin`: Each healthy upstream Endpoint is selected in round-robin order (Default strategy if none selected).
+- `WeightedLeastRequest`: The least request load balancer uses different algorithms depending on whether hosts have the same or different weights in an attempt to route traffic based upon the number of active requests or the load at the time of selection.
+- `Random`: The random strategy selects a random healthy Endpoints.
+- `RequestHash`: The request hashing strategy allows for load balancing based on request attributes. An upstream Endpoint is selected based on the hash of an element of a request. For example, requests that contain a consistent value in an HTTP request header will be routed to the same upstream Endpoint. Currently, only hashing of HTTP request headers, query parameters and the source IP of a request is supported.
+- `Cookie`: The cookie load balancing strategy is similar to the request hash strategy and is a convenience feature to implement session affinity, as described below.
+
+More information on the load balancing strategy can be found in [Envoy's documentation][7].
+
+The following example defines the strategy for the route `/` as `WeightedLeastRequest`.
+
+```yaml
+# httpproxy-lb-strategy.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: lb-strategy
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: strategy.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: s1-strategy
+ port: 80
+ - name: s2-strategy
+ port: 80
+ loadBalancerPolicy:
+ strategy: WeightedLeastRequest
+```
+
+The below example demonstrates how request hash load balancing policies can be configured:
+
+Request hash headers
+```yaml
+# httpproxy-lb-request-hash.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: lb-request-hash
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: request-hash.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: httpbin
+ port: 8080
+ loadBalancerPolicy:
+ strategy: RequestHash
+ requestHashPolicies:
+ - headerHashOptions:
+ headerName: X-Some-Header
+ terminal: true
+ - headerHashOptions:
+ headerName: User-Agent
+ - hashSourceIP: true
+```
+In this example, if a client request contains the `X-Some-Header` header, the value of the header will be hashed and used to route to an upstream Endpoint. This could be used to implement a similar workflow to cookie-based session affinity by passing a consistent value for this header. If it is present, because it is set as a `terminal` hash option, Envoy will not continue on to process to `User-Agent` header or source IP to calculate a hash. If `X-Some-Header` is not present, Envoy will use the `User-Agent` header value to make a routing decision along with the source IP of the client making the request. These policies can be used alone or as shown for an advanced routing decision.
+
+
+Request hash source ip
+```yaml
+# httpproxy-lb-request-hash-ip.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: lb-request-hash
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: request-hash.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: httpbin
+ port: 8080
+ loadBalancerPolicy:
+ strategy: RequestHash
+ requestHashPolicies:
+ - hashSourceIP: true
+```
+
+Request hash query parameters
+```yaml
+# httpproxy-lb-request-hash.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: lb-request-hash
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: request-hash.bar.com
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: httpbin
+ port: 8080
+ loadBalancerPolicy:
+ strategy: RequestHash
+ requestHashPolicies:
+ - queryParameterHashOptions:
+ prameterName: param1
+ terminal: true
+ - queryParameterHashOptions:
+ parameterName: param2
+```
+
+## Session Affinity
+
+Session affinity, also known as _sticky sessions_, is a load balancing strategy whereby a sequence of requests from a single client are consistently routed to the same application backend.
+Contour supports session affinity on a per-route basis with `loadBalancerPolicy` `strategy: Cookie`.
+
+```yaml
+# httpproxy-sticky-sessions.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: httpbin
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: httpbin.davecheney.com
+ routes:
+ - services:
+ - name: httpbin
+ port: 8080
+ loadBalancerPolicy:
+ strategy: Cookie
+```
+
+Session affinity is based on the premise that the backend servers are robust, do not change ordering, or grow and shrink according to load.
+None of these properties are guaranteed by a Kubernetes cluster and will be visible to applications that rely heavily on session affinity.
+
+Any perturbation in the set of pods backing a service risks redistributing backends around the hash ring.
+
+## Internal Redirects
+
+HTTPProxy supports handling 3xx redirects internally, that is capturing a configurable 3xx redirect response, synthesizing a new request, sending it to the upstream specified by the new route match, and returning the redirected response as the response to the original request.
+
+Internal redirects can be enabled in HTTPProxy by defining an `internalRedirectPolicy` on a route.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: myservice
+ namespace: prod
+spec:
+ virtualhost:
+ fqdn: foo.com
+ routes:
+ - conditions:
+ - prefix: /download
+ services:
+ - name: foo
+ port: 8080
+ internalRedirectPolicy:
+ maxInternalRedirects: 5
+ redirectResponseCodes: [ 302 ]
+ allowCrossSchemeRedirect: SafeOnly
+ denyRepeatedRouteRedirect: true
+```
+
+In this example, a sample redirect flow might look like this:
+
+1. Client sends a `GET` request for http://foo.com/download.
+2. Upstream `foo` returns a `302` response with `location: http://foo.com/myfile`.
+3. Envoy lookups a route for http://foo.com/myfile and sends a new `GET` request to the corresponding upstream with the additional request header `x-envoy-original-url: http://foo.com/download`.
+4. Envoy proxies the response data for http://foo.com/myfile to the client as the response to the original request.
+
+See [the API specification][9] and [Envoy's documentation][10] for more detail.
+
+[3]: /docs/{{< param version >}}/config/api/#projectcontour.io/v1.HTTPRequestRedirectPolicy
+[4]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-routeaction-timeout
+[5]: https://godoc.org/time#ParseDuration
+[6]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/route/v3/route_components.proto#envoy-v3-api-field-config-route-v3-routeaction-idle-timeout
+[7]: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/overview
+[8]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-field-config-core-v3-httpprotocoloptions-idle-timeout
+[9] /docs/{{< param version >}}/config/api/#projectcontour.io/v1.HTTPInternalRedirectPolicy
+[10] https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/http/http_connection_management.html#internal-redirects
diff --git a/site/content/docs/1.29/config/slow-start.md b/site/content/docs/1.29/config/slow-start.md
new file mode 100644
index 00000000000..b44cc18fdc3
--- /dev/null
+++ b/site/content/docs/1.29/config/slow-start.md
@@ -0,0 +1,39 @@
+# Slow Start Mode
+
+Slow start mode is a configuration setting that is used to gradually increase the amount of traffic targeted to a newly added upstream endpoint.
+By default, the amount of traffic will increase linearly for the duration of time window set by `window` field, starting from 10% of the target load balancing weight and increasing to 100% gradually.
+The easing function for the traffic increase can be adjusted by setting optional field `aggression`.
+A value above 1.0 results in a more aggressive increase initially, slowing down when nearing the end of the time window.
+Value below 1.0 results in slow initial increase, picking up speed when nearing the end of the time window.
+Optional field `minWeightPercent` can be set to change the minimum percent of target weight.
+It is used to avoid too small new weight, which may cause endpoint to receive no traffic in beginning of the slow start window.
+
+Slow start mode can be useful for example with JVM based applications, that might otherwise get overwhelmed during JIT warm-up period.
+Such applications may respond to requests slowly or return errors immediately after pod start or after container restarts.
+User impact of this behavior can be mitigated by using slow start configuration to gradually increase traffic to recently started service endpoints.
+
+The following example configures slow start mode for a service:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: slow-start
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ routes:
+ - services:
+ - name: java-app
+ port: 80
+ slowStartPolicy:
+ window: 3s
+ aggression: "1.0"
+ minWeightPercent: 10
+```
+
+Slow start mode works only with `RoundRobin` and `WeightedLeastRequest` [load balancing strategies][2].
+For more details see [Envoy documentation][1].
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/slow_start
+[2]: api/#projectcontour.io/v1.LoadBalancerPolicy
diff --git a/site/content/docs/1.29/config/tls-delegation.md b/site/content/docs/1.29/config/tls-delegation.md
new file mode 100644
index 00000000000..155796fe7eb
--- /dev/null
+++ b/site/content/docs/1.29/config/tls-delegation.md
@@ -0,0 +1,79 @@
+# TLS Certificate Delegation
+
+In order to support wildcard certificates, TLS certificates for a `*.somedomain.com`, which are stored in a namespace controlled by the cluster administrator, Contour supports a facility known as TLS Certificate Delegation.
+This facility allows the owner of a TLS certificate to delegate, for the purposes of referencing the TLS certificate, permission to Contour to read the Secret object from another namespace.
+Delegation works for both HTTPProxy and Ingress resources, however it needs an annotation to work with Ingress v1.
+
+If the `--watch-namespaces` configuration flag is used, it must define all namespaces that will be referenced by the delegation.
+
+The [`TLSCertificateDelegation`][1] resource defines a set of `delegations` in the `spec`.
+Each delegation references a `secretName` from the namespace where the `TLSCertificateDelegation` is created as well as describing a set of `targetNamespaces` in which the certificate can be referenced.
+If all namespaces should be able to reference the secret, then set `"*"` as the value of `targetNamespaces` (see example below).
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: TLSCertificateDelegation
+metadata:
+ name: example-com-wildcard
+ namespace: www-admin
+spec:
+ delegations:
+ - secretName: example-com-wildcard
+ targetNamespaces:
+ - example-com
+ - secretName: another-com-wildcard
+ targetNamespaces:
+ - "*"
+```
+
+In this example, the permission for Contour to reference the Secret `example-com-wildcard` in the `www-admin` namespace has been delegated to HTTPProxy and Ingress objects in the `example-com` namespace.
+Also, the permission for Contour to reference the Secret `another-com-wildcard` from all namespaces has been delegated to all HTTPProxy and Ingress objects in the cluster.
+
+To reference the secret from an HTTPProxy or Ingress v1beta1 you must use the slash syntax in the `secretName`:
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: www
+ namespace: example-com
+spec:
+ virtualhost:
+ fqdn: foo2.bar.com
+ tls:
+ secretName: www-admin/example-com-wildcard
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+To reference the secret from an Ingress v1 you must use the `projectcontour.io/tls-cert-namespace` annotation:
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ annotations:
+ projectcontour.io/tls-cert-namespace: www-admin
+ name: www
+ namespace: example-com
+spec:
+ rules:
+ - host: foo2.bar.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: s1
+ port:
+ number: 80
+ tls:
+ - hosts:
+ - foo2.bar.com
+ secretName: example-com-wildcard
+```
+
+
+[0]: https://github.com/projectcontour/contour/issues/3544
+[1]: /docs/{{< param version >}}/config/api/#projectcontour.io/v1.TLSCertificateDelegation
diff --git a/site/content/docs/1.29/config/tls-termination.md b/site/content/docs/1.29/config/tls-termination.md
new file mode 100644
index 00000000000..d1b26dc2f4e
--- /dev/null
+++ b/site/content/docs/1.29/config/tls-termination.md
@@ -0,0 +1,353 @@
+# TLS Termination
+
+HTTPProxy follows a similar pattern to Ingress for configuring TLS credentials.
+
+You can secure a HTTPProxy by specifying a Secret that contains TLS private key and certificate information.
+If multiple HTTPProxies utilize the same Secret, the certificate must include the necessary Subject Authority Name (SAN) for each fqdn.
+
+Contour (via Envoy) requires that clients send the Server Name Indication (SNI) TLS extension so that requests can be routed to the correct virtual host.
+Virtual hosts are strongly bound to SNI names.
+This means that the Host header in HTTP requests must match the SNI name that was sent at the start of the TLS session.
+
+Contour also follows a "secure first" approach.
+When TLS is enabled for a virtual host, any request to the insecure port is redirected to the secure interface with a 301 redirect.
+Specific routes can be configured to override this behavior and handle insecure requests by enabling the `spec.routes.permitInsecure` parameter on a Route.
+
+The TLS secret must:
+- be a Secret of type `kubernetes.io/tls`. This means that it must contain keys named `tls.crt` and `tls.key` that contain the certificate and private key to use for TLS, in PEM format.
+
+The TLS secret may also:
+- add any chain CA certificates required for validation into the `tls.crt` PEM bundle. If this is the case, the serving certificate must be the first certificate in the bundle and the intermediate CA certificates must be appended in issuing order.
+
+```yaml
+# ingress-tls.secret.yaml
+apiVersion: v1
+data:
+ tls.crt: base64 encoded cert
+ tls.key: base64 encoded key
+kind: Secret
+metadata:
+ name: testsecret
+ namespace: default
+type: kubernetes.io/tls
+```
+
+The HTTPProxy can be configured to use this secret using `tls.secretName` property:
+
+```yaml
+# httpproxy-tls.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: tls-example
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: foo2.bar.com
+ tls:
+ secretName: testsecret
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+If the `tls.secretName` property contains a slash, eg. `somenamespace/somesecret` then, subject to TLS Certificate Delegation, the TLS certificate will be read from `somesecret` in `somenamespace`.
+See TLS Certificate Delegation below for more information.
+
+The TLS **Minimum Protocol Version** a virtual host should negotiate can be specified by setting the `spec.virtualhost.tls.minimumProtocolVersion`:
+
+- 1.3
+- 1.2 (Default)
+
+## Fallback Certificate
+
+Contour provides virtual host based routing, so that any TLS request is routed to the appropriate service based on both the server name requested by the TLS client and the HOST header in the HTTP request.
+
+Since the HOST Header is encrypted during TLS handshake, it can’t be used for virtual host based routing unless the client sends HTTPS requests specifying hostname using the TLS server name, or the request is first decrypted using a default TLS certificate.
+
+Some legacy TLS clients do not send the server name, so Envoy does not know how to select the right certificate. A fallback certificate is needed for these clients.
+
+_**Note:**
+The minimum TLS protocol version for any fallback request is defined by the `minimum TLS protocol version` set in the Contour configuration file.
+Enabling the fallback certificate is not compatible with TLS client authentication._
+
+### Fallback Certificate Configuration
+
+First define the `namespace/name` in the [Contour configuration file][1] of a Kubernetes secret which will be used as the fallback certificate.
+Any HTTPProxy which enables fallback certificate delegation must have the fallback certificate delegated to the namespace in which the HTTPProxy object resides.
+
+To do that, configure `TLSCertificateDelegation` to delegate the fallback certificate to specific or all namespaces (e.g. `*`) which should be allowed to enable the fallback certificate.
+Finally, for each root HTTPProxy, set the `Spec.TLS.enableFallbackCertificate` parameter to allow that HTTPProxy to opt-in to the fallback certificate routing.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: fallback-tls-example
+ namespace: defaultub
+spec:
+ virtualhost:
+ fqdn: fallback.bar.com
+ tls:
+ secretName: testsecret
+ enableFallbackCertificate: true
+ routes:
+ - services:
+ - name: s1
+ port: 80
+---
+apiVersion: projectcontour.io/v1
+kind: TLSCertificateDelegation
+metadata:
+ name: fallback-delegation
+ namespace: www-admin
+spec:
+ delegations:
+ - secretName: fallback-secret-name
+ targetNamespaces:
+ - "*"
+```
+
+## Permitting Insecure Requests
+
+A HTTPProxy can be configured to permit insecure requests to specific Routes.
+In this example, any request to `foo2.bar.com/blog` will not receive a 301 redirect to HTTPS, but the `/` route will:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: tls-example-insecure
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: foo2.bar.com
+ tls:
+ secretName: testsecret
+ routes:
+ - services:
+ - name: s1
+ port: 80
+ - conditions:
+ - prefix: /blog
+ permitInsecure: true
+ services:
+ - name: s2
+ port: 80
+```
+
+## Client Certificate Validation
+
+It is possible to protect the backend service from unauthorized external clients by requiring the client to present a valid TLS certificate.
+Envoy will validate the client certificate by verifying that it is not expired and that a chain of trust can be established to the configured trusted root CA certificate.
+Only those requests with a valid client certificate will be accepted and forwarded to the backend service.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: with-client-auth
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ tls:
+ secretName: secret
+ clientValidation:
+ caSecret: client-root-ca
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+The preceding example enables validation by setting the optional `clientValidation` attribute.
+Its mandatory attribute `caSecret` contains a name of an existing Kubernetes Secret that must be of type "Opaque" and have only a data key named `ca.crt`.
+The data value of the key `ca.crt` must be a PEM-encoded certificate bundle and it must contain all the trusted CA certificates that are to be used for validating the client certificate.
+If the Opaque Secret also contains one of either `tls.crt` or `tls.key` keys, it will be ignored.
+
+By default, client certificates are required but some applications might support different authentication schemes. In that case you can set the `optionalClientCertificate` field to `true`. A client certificate will be requested, but the connection is allowed to continue if the client does not provide one. If a client certificate is sent, it will be verified according to the other properties, which includes disabling validations if `skipClientCertValidation` is set.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: with-optional-client-auth
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ tls:
+ secretName: secret
+ clientValidation:
+ caSecret: client-root-ca
+ optionalClientCertificate: true
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+When using external authorization, it may be desirable to use an external authorization server to validate client certificates on requests, rather than the Envoy proxy.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: with-client-auth-and-ext-authz
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ authorization:
+ # external authorization server configuration
+ tls:
+ secretName: secret
+ clientValidation:
+ caSecret: client-root-ca
+ skipClientCertValidation: true
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+In the above example, setting the `skipClientCertValidation` field to `true` will configure Envoy to require client certificates on requests and pass them along to a configured authorization server.
+Failed validation of client certificates by Envoy will be ignored and the `fail_verify_error` [Listener statistic][2] incremented.
+If the `caSecret` field is omitted, Envoy will request but not require client certificates to be present on requests.
+
+Optionally, you can enable certificate revocation check by providing one or more Certificate Revocation Lists (CRLs).
+Attribute `crlSecret` contains a name of an existing Kubernetes Secret that must be of type "Opaque" and have a data key named `crl.pem`.
+The data value of the key `crl.pem` must be one or more PEM-encoded CRLs concatenated together.
+Large CRL lists are not supported since individual Secrets are limited to 1MiB in size.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: with-client-auth-and-crl-check
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ tls:
+ secretName: secret
+ clientValidation:
+ caSecret: client-root-ca
+ crlSecret: client-crl
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+CRLs must be available from all relevant CAs, including intermediate CAs.
+Otherwise clients will be denied access, since the revocation status cannot be checked for the full certificate chain.
+This behavior can be controlled by `crlOnlyVerifyLeafCert` field.
+If the option is set to `true`, only the certificate at the end of the certificate chain will be subject to validation by CRL.
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: with-client-auth-and-crl-check-only-leaf
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ tls:
+ secretName: secret
+ clientValidation:
+ caSecret: client-root-ca
+ crlSecret: client-crl
+ crlOnlyVerifyLeafCert: true
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+## Client Certificate Details Forwarding
+
+HTTPProxy supports passing certificate data through the `x-forwarded-client-cert` header to let applications use details from client certificates (e.g. Subject, SAN...). Since the certificate (or the certificate chain) could exceed the web server header size limit, you have the ability to select what specific part of the certificate to expose in the header through the `forwardClientCertificate` field. Read more about the supported values in the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#x-forwarded-client-cert).
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: with-client-auth
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ tls:
+ secretName: secret
+ clientValidation:
+ caSecret: client-root-ca
+ forwardClientCertificate:
+ subject: true
+ cert: true
+ chain: true
+ dns: true
+ uri: true
+ routes:
+ - services:
+ - name: s1
+ port: 80
+```
+
+## TLS Session Proxying
+
+HTTPProxy supports proxying of TLS encapsulated TCP sessions.
+
+_Note_: The TCP session must be encrypted with TLS.
+This is necessary so that Envoy can use SNI to route the incoming request to the correct service.
+
+If `spec.virtualhost.tls.secretName` is present then that secret will be used to decrypt the TCP traffic at the edge.
+
+```yaml
+# httpproxy-tls-termination.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: example
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: tcp.example.com
+ tls:
+ secretName: secret
+ tcpproxy:
+ services:
+ - name: tcpservice
+ port: 8080
+ - name: otherservice
+ port: 9999
+ weight: 20
+```
+
+The `spec.tcpproxy` key indicates that this _root_ HTTPProxy will forward the de-encrypted TCP traffic to the backend service.
+
+### TLS Session Passthrough
+
+If you wish to handle the TLS handshake at the backend service set `spec.virtualhost.tls.passthrough: true` indicates that once SNI demuxing is performed, the encrypted connection will be forwarded to the backend service.
+The backend service is expected to have a key which matches the SNI header received at the edge, and be capable of completing the TLS handshake. This is called SSL/TLS Passthrough.
+
+```yaml
+# httpproxy-tls-passthrough.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: example
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: tcp.example.com
+ tls:
+ passthrough: true
+ tcpproxy:
+ services:
+ - name: tcpservice
+ port: 8080
+ - name: otherservice
+ port: 9999
+ weight: 20
+```
+
+[1]: ../configuration#fallback-certificate
+[2]: https://www.envoyproxy.io/docs/envoy/latest/configuration/listeners/stats#tls-statistics
diff --git a/site/content/docs/1.29/config/tracing.md b/site/content/docs/1.29/config/tracing.md
new file mode 100644
index 00000000000..5500c26d547
--- /dev/null
+++ b/site/content/docs/1.29/config/tracing.md
@@ -0,0 +1,117 @@
+# Tracing Support
+
+- [Overview](#overview)
+- [Tracing-config](#tracing-config)
+
+## Overview
+
+Envoy has rich support for [distributed tracing][1],and supports exporting data to third-party providers (Zipkin, Jaeger, Datadog, etc.)
+
+[OpenTelemetry][2] is a CNCF project which is working to become a standard in the space. It was formed as a merger of the OpenTracing and OpenCensus projects.
+
+Contour supports configuring envoy to export data to OpenTelemetry, and allows users to customize some configurations.
+
+- Custom service name, the default is `contour`.
+- Custom sampling rate, the default is `100`.
+- Custom the maximum length of the request path, the default is `256`.
+- Customize span tags from literal or request headers.
+- Customize whether to include the pod's hostname and namespace.
+
+## Tracing-config
+
+In order to use this feature, you must first select and deploy an opentelemetry-collector to receive the tracing data exported by envoy.
+
+First we should deploy an opentelemetry-collector to receive the tracing data exported by envoy
+```bash
+# install operator
+kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
+```
+
+Install an otel collector instance, with verbose logging exporter enabled:
+```shell
+kubectl apply -f - </`. If the CA secret's namespace is not the same namespace as the `HTTPProxy` resource, [TLS Certificate Delegation][4] must be used to allow the owner of the CA certificate secret to delegate, for the purposes of referencing the CA certificate in a different namespace, permission to Contour to read the Secret object from another namespace.
+
+_**Note:**
+If `spec.routes.services[].validation` is present, `spec.routes.services[].{name,port}` must point to a Service with a matching `projectcontour.io/upstream-protocol.tls` Service annotation._
+
+In the example below, the upstream service is named `secure-backend` and uses port `8443`:
+
+```yaml
+# httpproxy-example.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: example
+spec:
+ virtualhost:
+ fqdn: www.example.com
+ routes:
+ - services:
+ - name: secure-backend
+ port: 8443
+ validation:
+ caSecret: my-certificate-authority
+ subjectName: backend.example.com
+```
+
+```yaml
+# service-secure-backend.yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: secure-backend
+ annotations:
+ projectcontour.io/upstream-protocol.tls: "8443"
+spec:
+ ports:
+ - name: https
+ port: 8443
+ selector:
+ app: secure-backend
+
+```
+
+If the `validation` spec is defined on a service, but the secret which it references does not exist, Contour will reject the update and set the status of the HTTPProxy object accordingly.
+This helps prevent the case of proxying to an upstream where validation is requested, but not yet available.
+
+```yaml
+Status:
+ Current Status: invalid
+ Description: route "/": service "tls-nginx": upstreamValidation requested but secret not found or misconfigured
+```
+
+## Upstream Validation
+
+When defining upstream services on a route, it's possible to configure the connection from Envoy to the backend endpoint to communicate over TLS.
+
+A CA certificate and a Subject Name must be provided, which are both used to verify the backend endpoint's identity.
+
+If specifying multiple Subject Names, `SubjectNames` and `SubjectName` must be configured such that `SubjectNames[0] == SubjectName`.
+
+The CA certificate bundle for the backend service should be supplied in a Kubernetes Secret.
+The referenced Secret must be of type "Opaque" and have a data key named `ca.crt`.
+This data value must be a PEM-encoded certificate bundle.
+
+In addition to the CA certificate and the subject name, the Kubernetes service must also be annotated with a Contour specific annotation: `projectcontour.io/upstream-protocol.tls: ` ([see annotations section][1]).
+
+_**Note:** This annotation is applied to the Service not the Ingress or HTTPProxy object._
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: blog
+ namespace: marketing
+spec:
+ routes:
+ - services:
+ - name: s2
+ port: 80
+ validation:
+ caSecret: foo-ca-cert
+ subjectName: foo.marketing
+ subjectNames:
+ - foo.marketing
+ - bar.marketing
+```
+
+## Envoy Client Certificate
+
+Contour can be configured with a `namespace/name` in the [Contour configuration file][3] of a Kubernetes secret which Envoy uses as a client certificate when upstream TLS is configured for the backend.
+Envoy will send the certificate during TLS handshake when the backend applications request the client to present its certificate.
+Backend applications can validate the certificate to ensure that the connection is coming from Envoy.
+
+[1]: annotations.md
+[2]: api/#projectcontour.io/v1.Service
+[3]: ../configuration#fallback-certificate
+[4]: tls-delegation.md
diff --git a/site/content/docs/1.29/config/virtual-hosts.md b/site/content/docs/1.29/config/virtual-hosts.md
new file mode 100644
index 00000000000..b7a138dde6b
--- /dev/null
+++ b/site/content/docs/1.29/config/virtual-hosts.md
@@ -0,0 +1,138 @@
+# Virtual Hosts
+
+
+Similar to Ingress, HTTPProxy support name-based virtual hosting.
+Name-based virtual hosts use multiple host names with the same IP address.
+
+```
+foo.bar.com --| |-> foo.bar.com s1:80
+ | 178.91.123.132 |
+bar.foo.com --| |-> bar.foo.com s2:80
+```
+
+Unlike Ingress however, HTTPProxy only support a single root domain per HTTPProxy object.
+As an example, this Ingress object:
+
+```yaml
+# ingress-name.yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: name-example
+spec:
+ rules:
+ - host: foo1.bar.com
+ http:
+ paths:
+ - backend:
+ service:
+ name: s1
+ port:
+ number: 80
+ pathType: Prefix
+ - host: bar1.bar.com
+ http:
+ paths:
+ - backend:
+ service:
+ name: s2
+ port:
+ number: 80
+ pathType: Prefix
+```
+
+must be represented by two different HTTPProxy objects:
+
+```yaml
+# httpproxy-name.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: name-example-foo
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: foo1.bar.com
+ routes:
+ - services:
+ - name: s1
+ port: 80
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: name-example-bar
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: bar1.bar.com
+ routes:
+ - services:
+ - name: s2
+ port: 80
+```
+
+A HTTPProxy object that contains a [`virtualhost`][2] field is known as a "root proxy".
+
+## Virtualhost aliases
+
+To present the same set of routes under multiple DNS entries (e.g. `www.example.com` and `example.com`), including a service with a `prefix` condition of `/` can be used.
+
+```yaml
+# httpproxy-inclusion-multipleroots.yaml
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: multiple-root
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: bar.com
+ includes:
+ - name: main
+ namespace: default
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: multiple-root-www
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: www.bar.com
+ includes:
+ - name: main
+ namespace: default
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: main
+ namespace: default
+spec:
+ routes:
+ - services:
+ - name: s2
+ port: 80
+```
+
+## Restricted root namespaces
+
+HTTPProxy inclusion allows Administrators to limit which users/namespaces may configure routes for a given domain, but it does not restrict where root HTTPProxies may be created.
+Contour has an enforcing mode which accepts a list of namespaces where root HTTPProxy are valid.
+Only users permitted to operate in those namespaces can therefore create HTTPProxy with the [`virtualhost`] field ([see API docs][2]).
+
+This restricted mode is enabled in Contour by specifying a command line flag, `--root-namespaces`, which will restrict Contour to only searching the defined namespaces for root HTTPProxy. This CLI flag accepts a comma separated list of namespaces where HTTPProxy are valid (e.g. `--root-namespaces=default,kube-system,my-admin-namespace`).
+
+HTTPProxy with a defined [virtualhost][2] field that are not in one of the allowed root namespaces will be flagged as `invalid` and will be ignored by Contour.
+
+Additionally, when defined, Contour will only watch for Kubernetes secrets in these namespaces ignoring changes in all other namespaces.
+Proper RBAC rules should also be created to restrict what namespaces Contour has access matching the namespaces passed to the command line flag.
+An example of this is included in the [examples directory][1] and shows how you might create a namespace called `root-httproxy`.
+
+_**Note:** The restricted root namespace feature is only supported for HTTPProxy CRDs.
+`--root-namespaces` does not affect the operation of Ingress objects. In order to limit other resources, see the `--watch-namespaces` configuration flag._
+
+[1]: {{< param github_url>}}/tree/{{< param branch >}}/examples/root-rbac
+[2]: api/#projectcontour.io/v1.VirtualHost
diff --git a/site/content/docs/1.29/config/websockets.md b/site/content/docs/1.29/config/websockets.md
new file mode 100644
index 00000000000..136c0468378
--- /dev/null
+++ b/site/content/docs/1.29/config/websockets.md
@@ -0,0 +1,27 @@
+# Websockets
+
+WebSocket support can be enabled on specific routes using the `enableWebsockets` field:
+
+```yaml
+# httpproxy-websockets.yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: chat
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: chat.example.com
+ routes:
+ - services:
+ - name: chat-app
+ port: 80
+ - conditions:
+ - prefix: /websocket
+ enableWebsockets: true # Setting this to true enables websocket for all paths that match /websocket
+ services:
+ - name: chat-app
+ port: 80
+```
+
+If you are using Gateway API, websockets are enabled by default at the Listener level.
diff --git a/site/content/docs/1.29/configuration.md b/site/content/docs/1.29/configuration.md
new file mode 100644
index 00000000000..8e20de93fe5
--- /dev/null
+++ b/site/content/docs/1.29/configuration.md
@@ -0,0 +1,541 @@
+# Contour Configuration Reference
+
+- [Serve Flags](#serve-flags)
+- [Configuration File](#configuration-file)
+- [Environment Variables](#environment-variables)
+- [Bootstrap Config File](#bootstrap-config-file)
+
+## Overview
+
+There are various ways to configure Contour, flags, the configuration file, as well as environment variables.
+Contour has a precedence of configuration for contour serve, meaning anything configured in the config file is overridden by environment vars which are overridden by cli flags.
+
+## Serve Flags
+
+The `contour serve` command is the main command which is used to watch for Kubernetes resource and process them into Envoy configuration which is then streamed to any Envoy via its xDS gRPC connection.
+There are a number of flags that can be passed to this command which further configures how Contour operates.
+Many of these flags are mirrored in the [Contour Configuration File](#configuration-file).
+
+| Flag Name | Description |
+| --------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
+| `--config-path` | Path to base configuration |
+| `--contour-config-name` | Name of the ContourConfiguration resource to use |
+| `--incluster` | Use in cluster configuration |
+| `--kubeconfig=` | Path to kubeconfig (if not in running inside a cluster) |
+| `--xds-address=` | xDS gRPC API address |
+| `--xds-port=` | xDS gRPC API port |
+| `--stats-address=` | Envoy /stats interface address |
+| `--stats-port=` | Envoy /stats interface port |
+| `--debug-http-address=` | Address the debug http endpoint will bind to. |
+| `--debug-http-port=` | Port the debug http endpoint will bind to |
+| `--http-address=` | Address the metrics HTTP endpoint will bind to |
+| `--http-port=` | Port the metrics HTTP endpoint will bind to. |
+| `--health-address=` | Address the health HTTP endpoint will bind to |
+| `--health-port=` | Port the health HTTP endpoint will bind to |
+| `--contour-cafile=` | CA bundle file name for serving gRPC with TLS |
+| `--contour-cert-file=` | Contour certificate file name for serving gRPC over TLS |
+| `--contour-key-file=` | Contour key file name for serving gRPC over TLS |
+| `--insecure` | Allow serving without TLS secured gRPC |
+| `--root-namespaces=` | Restrict contour to searching these namespaces for root ingress routes |
+| `--watch-namespaces=` | Restrict contour to searching these namespaces for all resources |
+| `--ingress-class-name=` | Contour IngressClass name (comma-separated list allowed) |
+| `--ingress-status-address=` | Address to set in Ingress object status |
+| `--envoy-http-access-log=` | Envoy HTTP access log |
+| `--envoy-https-access-log=` | Envoy HTTPS access log |
+| `--envoy-service-http-address=` | Kubernetes Service address for HTTP requests |
+| `--envoy-service-https-address=` | Kubernetes Service address for HTTPS requests |
+| `--envoy-service-http-port=` | Kubernetes Service port for HTTP requests |
+| `--envoy-service-https-port=` | Kubernetes Service port for HTTPS requests |
+| `--envoy-service-name=` | Name of the Envoy service to inspect for Ingress status details. |
+| `--envoy-service-namespace=` | Envoy Service Namespace |
+| `--use-proxy-protocol` | Use PROXY protocol for all listeners |
+| `--accesslog-format=` | Format for Envoy access logs |
+| `--disable-leader-election` | Disable leader election mechanism |
+| `--disable-feature=` | Do not start an informer for the specified resources. Flag can be given multiple times. |
+| `--leader-election-lease-duration` | The duration of the leadership lease. |
+| `--leader-election-renew-deadline` | The duration leader will retry refreshing leadership before giving up. |
+| `--leader-election-retry-period` | The interval which Contour will attempt to acquire leadership lease. |
+| `--leader-election-resource-name` | The name of the resource (Lease) leader election will lease. |
+| `--leader-election-resource-namespace` | The namespace of the resource (Lease) leader election will lease. |
+| `-d, --debug` | Enable debug logging |
+| `--kubernetes-debug=` | Enable Kubernetes client debug logging |
+| `--log-format=` | Log output format for Contour. Either text (default) or json. |
+| `--kubernetes-client-qps=` | QPS allowed for the Kubernetes client. |
+| `--kubernetes-client-burst=` | Burst allowed for the Kubernetes client. |
+
+## Configuration File
+
+A configuration file can be passed to the `--config-path` argument of the `contour serve` command to specify additional configuration to Contour.
+In most deployments, this file is passed to Contour via a ConfigMap which is mounted as a volume to the Contour pod.
+
+The Contour configuration file is optional.
+In its absence, Contour will operate with reasonable defaults.
+Where Contour settings can also be specified with command-line flags, the command-line value takes precedence over the configuration file.
+
+| Field Name | Type | Default | Description |
+|---------------------------| ---------------------- |------------------------------------------------------------------------------------------------------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| accesslog-format | string | `envoy` | This key sets the global [access log format][2] for Envoy. Valid options are `envoy` or `json`. |
+| accesslog-format-string | string | None | If present, this specifies custom access log format for Envoy. See [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage) for more information about the syntax. This field only has effect if `accesslog-format` is `envoy` |
+| accesslog-level | string | `info` | This field specifies the verbosity level of the access log. Valid options are `info` (default, all requests are logged), `error` (all non-success, i.e. 300+ response code, requests are logged), `critical` (all server error, i.e. 500+ response code, requests are logged) and `disabled`. |
+| debug | boolean | `false` | Enables debug logging. |
+| default-http-versions | string array | HTTP/1.1
HTTP/2
| This array specifies the HTTP versions that Contour should program Envoy to serve. HTTP versions are specified as strings of the form "HTTP/x", where "x" represents the version number. |
+| disableAllowChunkedLength | boolean | `false` | If this field is true, Contour will disable the RFC-compliant Envoy behavior to strip the `Content-Length` header if `Transfer-Encoding: chunked` is also set. This is an emergency off-switch to revert back to Envoy's default behavior in case of failures.
+| disableMergeSlashes | boolean | `false` | This field disables Envoy's non-standard merge_slashes path transformation behavior that strips duplicate slashes from request URL paths.
+| serverHeaderTransformation | string | `overwrite` | This field defines the action to be applied to the Server header on the response path. Values: `overwrite` (default), `append_if_absent`, `pass_through`
+| disablePermitInsecure | boolean | `false` | If this field is true, Contour will ignore `PermitInsecure` field in HTTPProxy documents. |
+| envoy-service-name | string | `envoy` | This sets the service name that will be inspected for address details to be applied to Ingress objects. |
+| envoy-service-namespace | string | `projectcontour` | This sets the namespace of the service that will be inspected for address details to be applied to Ingress objects. If the `CONTOUR_NAMESPACE` environment variable is present, Contour will populate this field with its value. |
+| ingress-status-address | string | None | If present, this specifies the address that will be copied into the Ingress status for each Ingress that Contour manages. It is exclusive with `envoy-service-name` and `envoy-service-namespace`. |
+| incluster | boolean | `false` | This field specifies that Contour is running in a Kubernetes cluster and should use the in-cluster client access configuration. |
+| json-fields | string array | [fields][5] | This is the list the field names to include in the JSON [access log format][2]. This field only has effect if `accesslog-format` is `json`. |
+| kubeconfig | string | `$HOME/.kube/config` | Path to a Kubernetes [kubeconfig file][3] for when Contour is executed outside a cluster. |
+| kubernetesClientQPS | float32 | | QPS allowed for the Kubernetes client. |
+| kubernetesClientBurst | int | | Burst allowed for the Kubernetes client. |
+| policy | PolicyConfig | | The default [policy configuration](#policy-configuration). |
+| tls | TLS | | The default [TLS configuration](#tls-configuration). |
+| timeouts | TimeoutConfig | | The [timeout configuration](#timeout-configuration). |
+| cluster | ClusterConfig | | The [cluster configuration](#cluster-configuration). |
+| network | NetworkConfig | | The [network configuration](#network-configuration). |
+| listener | ListenerConfig | | The [listener configuration](#listener-configuration). |
+| server | ServerConfig | | The [server configuration](#server-configuration) for `contour serve` command. |
+| gateway | GatewayConfig | | The [gateway-api Gateway configuration](#gateway-configuration). |
+| rateLimitService | RateLimitServiceConfig | | The [rate limit service configuration](#rate-limit-service-configuration). |
+| enableExternalNameService | boolean | `false` | Enable ExternalName Service processing. Enabling this has security implications. Please see the [advisory](https://github.com/projectcontour/contour/security/advisories/GHSA-5ph6-qq5x-7jwc) for more details. |
+| metrics | MetricsParameters | | The [metrics configuration](#metrics-configuration) |
+| featureFlags | string array | `[]` | Defines the toggle to enable new contour features. Available toggles are:
1. `useEndpointSlices` - configures contour to fetch endpoint data from k8s endpoint slices. |
+
+### TLS Configuration
+
+The TLS configuration block can be used to configure default values for how
+Contour should provision TLS hosts.
+
+| Field Name | Type | Default | Description |
+| ------------------------ | -------- | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| minimum-protocol-version | string | `1.2` | This field specifies the minimum TLS protocol version that is allowed. Valid options are `1.2` (default) and `1.3`. Any other value defaults to TLS 1.2.
+| maximum-protocol-version | string | `1.3` | This field specifies the maximum TLS protocol version that is allowed. Valid options are `1.2` and `1.3`. Any other value defaults to TLS 1.3. |
+| fallback-certificate | | | [Fallback certificate configuration](#fallback-certificate). |
+| envoy-client-certificate | | | [Client certificate configuration for Envoy](#envoy-client-certificate). |
+| cipher-suites | []string | See [config package documentation](https://pkg.go.dev/github.com/projectcontour/contour/pkg/config#pkg-variables) | This field specifies the TLS ciphers to be supported by TLS listeners when negotiating TLS 1.2. This parameter should only be used by advanced users. Note that this is ignored when TLS 1.3 is in use. The set of ciphers that are allowed is a superset of those supported by default in stock, non-FIPS Envoy builds and FIPS builds as specified [here](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites). Custom ciphers not accepted by Envoy in a standard build are not supported. |
+
+### Upstream TLS Configuration
+
+The Upstream TLS configuration block can be used to configure default values for how Contour establishes TLS for upstream connections.
+
+| Field Name | Type | Default | Description |
+| ------------------------ | -------- | ----------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| minimum-protocol-version | string | `1.2` | This field specifies the minimum TLS protocol version that is allowed. Valid options are `1.2` (default) and `1.3`. Any other value defaults to TLS 1.2. |
+| maximum-protocol-version | string | `1.3` | This field specifies the maximum TLS protocol version that is allowed. Valid options are `1.2` and `1.3`. Any other value defaults to TLS 1.3. |
+| cipher-suites | []string | See [config package documentation](https://pkg.go.dev/github.com/projectcontour/contour/pkg/config#pkg-variables) | This field specifies the TLS ciphers to be supported by TLS listeners when negotiating TLS 1.2. This parameter should only be used by advanced users. Note that this is ignored when TLS 1.3 is in use. The set of ciphers that are allowed is a superset of those supported by default in stock, non-FIPS Envoy builds and FIPS builds as specified [here](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites). Custom ciphers not accepted by Envoy in a standard build are not supported. |
+
+### Fallback Certificate
+
+| Field Name | Type | Default | Description |
+| ---------- | ------ | ------- | ----------------------------------------------------------------------------------------------- |
+| name | string | `""` | This field specifies the name of the Kubernetes secret to use as the fallback certificate. |
+| namespace | string | `""` | This field specifies the namespace of the Kubernetes secret to use as the fallback certificate. |
+
+
+### Envoy Client Certificate
+
+| Field Name | Type | Default | Description |
+| ---------- | ------ | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| name | string | `""` | This field specifies the name of the Kubernetes secret to use as the client certificate and private key when establishing TLS connections to the backend service. |
+| namespace | string | `""` | This field specifies the namespace of the Kubernetes secret to use as the client certificate and private key when establishing TLS connections to the backend service. |
+
+
+### Timeout Configuration
+
+The timeout configuration block can be used to configure various timeouts for the proxies. All fields are optional; Contour/Envoy defaults apply if a field is not specified.
+
+| Field Name | Type | Default | Description |
+| -------------------------------- | ------ | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| request-timeout | string | none* | This field specifies the default request timeout. Note that this is a timeout for the entire request, not an idle timeout. Must be a [valid Go duration string][4], or omitted or set to `infinity` to disable the timeout entirely. See [the Envoy documentation][12] for more information.
_Note: A value of `0s` previously disabled this timeout entirely. This is no longer the case. Use `infinity` or omit this field to disable the timeout._ |
+| connection-idle-timeout | string | `60s` | This field defines how long the proxy should wait while there are no active requests (for HTTP/1.1) or streams (for HTTP/2) before terminating an HTTP connection. The timeout applies to downstream connections only. Must be a [valid Go duration string][4], or `infinity` to disable the timeout entirely. See [the Envoy documentation][8] for more information. |
+| stream-idle-timeout | string | `5m`* | This field defines how long the proxy should wait while there is no activity during single request/response (for HTTP/1.1) or stream (for HTTP/2). Timeout will not trigger while HTTP/1.1 connection is idle between two consecutive requests. Must be a [valid Go duration string][4], or `infinity` to disable the timeout entirely. See [the Envoy documentation][9] for more information. |
+| max-connection-duration | string | none* | This field defines the maximum period of time after an HTTP connection has been established from the client to the proxy before it is closed by the proxy, regardless of whether there has been activity or not. Must be a [valid Go duration string][4], or omitted or set to `infinity` for no max duration. See [the Envoy documentation][10] for more information. |
+| delayed-close-timeout | string | `1s`* | *Note: this is an advanced setting that should not normally need to be tuned.*
This field defines how long envoy will wait, once connection close processing has been initiated, for the downstream peer to close the connection before Envoy closes the socket associated with the connection. Setting this timeout to 'infinity' will disable it. See [the Envoy documentation][13] for more information. |
+| connection-shutdown-grace-period | string | `5s`* | This field defines how long the proxy will wait between sending an initial GOAWAY frame and a second, final GOAWAY frame when terminating an HTTP/2 connection. During this grace period, the proxy will continue to respond to new streams. After the final GOAWAY frame has been sent, the proxy will refuse new streams. Must be a [valid Go duration string][4]. See [the Envoy documentation][11] for more information. |
+| connect-timeout | string | `2s` | This field defines how long the proxy will wait for the upstream connection to be established.
+
+_This is Envoy's default setting value and is not explicitly configured by Contour._
+
+### Cluster Configuration
+
+The cluster configuration block can be used to configure various parameters for Envoy clusters.
+
+| Field Name | Type | Default | Description |
+|-----------------------------------|--------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| dns-lookup-family | string | auto | This field specifies the dns-lookup-family to use for upstream requests to externalName type Kubernetes services from an HTTPProxy route. Values are: `auto`, `v4`, `v6`, `all` |
+| max-requests-per-connection | int | none | This field specifies the maximum requests for upstream connections. If not specified, there is no limit |
+| circuit-breakers | [CircuitBreakers](#circuit-breakers) | none | This field specifies the default value for [circuit-breaker-annotations](https://projectcontour.io/docs/main/config/annotations/) for services that don't specify them. |
+| per-connection-buffer-limit-bytes | int | 1MiB* | This field specifies the soft limit on size of the cluster’s new connection read and write buffer. If not specified, Envoy defaults of 1MiB apply |
+| upstream-tls | UpstreamTLS | | [Upstream TLS configuration](#upstream-tls) |
+
+_This is Envoy's default setting value and is not explicitly configured by Contour._
+
+
+
+
+### Network Configuration
+
+The network configuration block can be used to configure various parameters network connections.
+
+| Field Name | Type | Default | Description |
+| ---------------- | ---- | ------- | ----------------------------------------------------------------------------------------------------------------------- |
+| num-trusted-hops | int | 0 | Configures the number of additional ingress proxy hops from the right side of the x-forwarded-for HTTP header to trust. |
+| admin-port | int | 9001 | Configures the Envoy Admin read-only listener on Envoy. Set to `0` to disable. |
+
+### Listener Configuration
+
+The listener configuration block can be used to configure various parameters for Envoy listener.
+
+| Field Name | Type | Default | Description |
+|-----------------------------------|--------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| connection-balancer | string | `""` | This field specifies the listener connection balancer. If the value is `exact`, the listener will use the exact connection balancer to balance connections between threads in a single Envoy process. See [the Envoy documentation][14] for more information. |
+| max-requests-per-connection | int | none | This field specifies the maximum requests for downstream connections. If not specified, there is no limit |
+| per-connection-buffer-limit-bytes | int | 1MiB* | This field specifies the soft limit on size of the listener’s new connection read and write buffer. If not specified, Envoy defaults of 1MiB apply |
+| socket-options | SocketOptions | | The [Socket Options](#socket-options) for Envoy listeners. |
+| max-requests-per-io-cycle | int | none | Defines the limit on number of HTTP requests that Envoy will process from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is detected. Configures the `http.max_requests_per_io_cycle` Envoy runtime setting. The default value when this is not set is no limit. |
+| http2-max-concurrent-streams | int | none | Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed for a peer on a single HTTP/2 connection. It is recommended to not set this lower than 100 but this field can be used to bound resource usage by HTTP/2 connections and mitigate attacks like CVE-2023-44487. The default value when this is not set is unlimited. |
+
+_This is Envoy's default setting value and is not explicitly configured by Contour._
+
+### Server Configuration
+
+The server configuration block can be used to configure various settings for the `contour serve` command.
+
+| Field Name | Type | Default | Description |
+| --------------- | ------ | ------- | ----------------------------------------------------------------------------- |
+| xds-server-type | string | envoy | This field specifies the xDS Server to use. Options are `envoy` or `contour` (deprecated). |
+
+### Gateway Configuration
+
+The gateway configuration block is used to configure which gateway-api Gateway Contour should configure:
+
+| Field Name | Type | Default | Description |
+| -------------- | -------------- | ------- | ------------------------------------------------------------------------------ |
+| gatewayRef | NamespacedName | | [Gateway namespace and name](#gateway-ref). |
+
+### Gateway Ref
+
+| Field Name | Type | Default | Description |
+| ---------- | ------ | ------- | ----------------------------------------------------------------------------------------------- |
+| name | string | `""` | This field specifies the name of the specific Gateway to reconcile. |
+| namespace | string | `""` | This field specifies the namespace of the specific Gateway to reconcile. |
+
+### Policy Configuration
+
+The Policy configuration block can be used to configure default policy values
+that are set if not overridden by the user.
+
+The `request-headers` field is used to rewrite headers on a HTTP request, and
+the `response-headers` field is used to rewrite headers on a HTTP response.
+
+| Field Name | Type | Default | Description |
+| ---------------- | ------------ | ------- | ------------------------------------------------------------------------------------------------- |
+| request-headers | HeaderPolicy | none | The default request headers set or removed on all service routes if not overridden in the object |
+| response-headers | HeaderPolicy | none | The default response headers set or removed on all service routes if not overridden in the object |
+| applyToIngress | Boolean | false | Whether the global policy should apply to Ingress objects |
+
+#### HeaderPolicy
+
+The `set` field sets an HTTP header value, creating it if it doesn't already exist but not overwriting it if it does.
+The `remove` field removes an HTTP header.
+
+| Field Name | Type | Default | Description |
+| ---------- | ----------------- | ------- | ------------------------------------------------------------------------------- |
+| set | map[string]string | none | Map of headers to set on all service routes if not overridden in the object |
+| remove | []string | none | List of headers to remove on all service routes if not overridden in the object |
+
+Note: the values of entries in the `set` and `remove` fields can be overridden in HTTPProxy objects but it is not possible to remove these entries.
+
+### Rate Limit Service Configuration
+
+The rate limit service configuration block is used to configure an optional global rate limit service:
+
+| Field Name | Type | Default | Description |
+|-----------------------------| ------ | ------- |------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| extensionService | string | | This field identifies the extension service defining the rate limit service, formatted as /. |
+| domain | string | contour | This field defines the rate limit domain value to pass to the rate limit service. Acts as a container for a set of rate limit definitions within the RLS. |
+| failOpen | bool | false | This field defines whether to allow requests to proceed when the rate limit service fails to respond with a valid rate limit decision within the timeout defined on the extension service. |
+| enableXRateLimitHeaders | bool | false | This field defines whether to include the X-RateLimit headers X-RateLimit-Limit, X-RateLimit-Remaining, and X-RateLimit-Reset (as defined by the IETF Internet-Draft https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html), on responses to clients when the Rate Limit Service is consulted for a request. |
+| enableResourceExhaustedCode | bool | false | This field defines whether to translate status code 429 to gRPC RESOURCE_EXHAUSTED instead of UNAVAILABLE. |
+
+### Metrics Configuration
+
+MetricsParameters holds configurable parameters for Contour and Envoy metrics.
+
+| Field Name | Type | Default | Description |
+| ----------- | ----------------------- | ------- | -------------------------------------------------------------------- |
+| contour | MetricsServerParameters | | [Metrics Server Parameters](#metrics-server-parameters) for Contour. |
+| envoy | MetricsServerParameters | | [Metrics Server Parameters](#metrics-server-parameters) for Envoy. |
+
+### Metrics Server Parameters
+
+MetricsServerParameters holds configurable parameters for Contour and Envoy metrics.
+Metrics are served over HTTPS if `server-certificate-path` and `server-key-path` are set.
+Metrics and health endpoints cannot have the same port number when metrics are served over HTTPS.
+
+| Field Name | Type | Default | Description |
+| ----------------------- | ------ | ---------------------------- | -----------------------------------------------------------------------------|
+| address | string | 0.0.0.0 | Address that metrics server will bind to. |
+| port | int | 8000 (Contour), 8002 (Envoy) | Port that metrics server will bind to. |
+| server-certificate-path | string | none | Optional path to the server certificate file. |
+| server-key-path | string | none | Optional path to the server private key file. |
+| ca-certificate-path | string | none | Optional path to the CA certificate file used to verify client certificates. |
+
+### Socket Options
+
+| Field Name | Type | Default | Description |
+| --------------- | ------ | ------- | ----------------------------------------------------------------------------- |
+| tos | int | 0 | Defines the value for IPv4 TOS field (including 6 bit DSCP field) for IP packets originating from Envoy listeners. Single value is applied to all listeners. The value must be in the range 0-255, 0 means socket option is not set. If listeners are bound to IPv6-only addresses, setting this option will cause an error. |
+| traffic-class | int | 0 | Defines the value for IPv6 Traffic Class field (including 6 bit DSCP field) for IP packets originating from the Envoy listeners. Single value is applied to all listeners. The value must be in the range 0-255, 0 means socket option is not set. If listeners are bound to IPv4-only addresses, setting this option will cause an error. |
+
+
+### Circuit Breakers
+
+| Field Name | Type | Default | Description |
+| --------------- | ------ | ------- | ----------------------------------------------------------------------------- |
+| max-connections | int | 0 | The maximum number of connections that a single Envoy instance allows to the Kubernetes Service; defaults to 1024. |
+| max-pending-requests | int | 0 | The maximum number of pending requests that a single Envoy instance allows to the Kubernetes Service; defaults to 1024. |
+| max-requests | int | 0 | The maximum parallel requests a single Envoy instance allows to the Kubernetes Service; defaults to 1024 |
+| max-retries | int | 0 | The maximum number of parallel retries a single Envoy instance allows to the Kubernetes Service; defaults to 3. This setting only makes sense if the cluster is configured to do retries.|
+
+### Configuration Example
+
+The following is an example ConfigMap with configuration file included:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contour
+ namespace: projectcontour
+data:
+ contour.yaml: |
+ #
+ # server:
+ # determine which XDS Server implementation to utilize in Contour.
+ # xds-server-type: envoy
+ #
+ # specify the gateway-api Gateway Contour should configure
+ # gateway:
+ # namespace: projectcontour
+ # name: contour
+ #
+ # should contour expect to be running inside a k8s cluster
+ # incluster: true
+ #
+ # path to kubeconfig (if not running inside a k8s cluster)
+ # kubeconfig: /path/to/.kube/config
+ #
+ # Disable RFC-compliant behavior to strip "Content-Length" header if
+ # "Tranfer-Encoding: chunked" is also set.
+ # disableAllowChunkedLength: false
+ # Disable HTTPProxy permitInsecure field
+ disablePermitInsecure: false
+ tls:
+ # minimum TLS version that Contour will negotiate
+ # minimum-protocol-version: "1.2"
+ # TLS ciphers to be supported by Envoy TLS listeners when negotiating
+ # TLS 1.2.
+ # cipher-suites:
+ # - '[ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305]'
+ # - '[ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305]'
+ # - 'ECDHE-ECDSA-AES256-GCM-SHA384'
+ # - 'ECDHE-RSA-AES256-GCM-SHA384'
+ # Defines the Kubernetes name/namespace matching a secret to use
+ # as the fallback certificate when requests which don't match the
+ # SNI defined for a vhost.
+ fallback-certificate:
+ # name: fallback-secret-name
+ # namespace: projectcontour
+ envoy-client-certificate:
+ # name: envoy-client-cert-secret-name
+ # namespace: projectcontour
+ ### Logging options
+ # Default setting
+ accesslog-format: envoy
+ # The default access log format is defined by Envoy but it can be customized by setting following variable.
+ # accesslog-format-string: "...\n"
+ # To enable JSON logging in Envoy
+ # accesslog-format: json
+ # accesslog-level: info
+ # The default fields that will be logged are specified below.
+ # To customise this list, just add or remove entries.
+ # The canonical list is available at
+ # https://godoc.org/github.com/projectcontour/contour/internal/envoy#JSONFields
+ # json-fields:
+ # - "@timestamp"
+ # - "authority"
+ # - "bytes_received"
+ # - "bytes_sent"
+ # - "downstream_local_address"
+ # - "downstream_remote_address"
+ # - "duration"
+ # - "method"
+ # - "path"
+ # - "protocol"
+ # - "request_id"
+ # - "requested_server_name"
+ # - "response_code"
+ # - "response_flags"
+ # - "uber_trace_id"
+ # - "upstream_cluster"
+ # - "upstream_host"
+ # - "upstream_local_address"
+ # - "upstream_service_time"
+ # - "user_agent"
+ # - "x_forwarded_for"
+ #
+ # default-http-versions:
+ # - "HTTP/2"
+ # - "HTTP/1.1"
+ #
+ # The following shows the default proxy timeout settings.
+ # timeouts:
+ # request-timeout: infinity
+ # connection-idle-timeout: 60s
+ # stream-idle-timeout: 5m
+ # max-connection-duration: infinity
+ # connection-shutdown-grace-period: 5s
+ #
+ # Envoy cluster settings.
+ # cluster:
+ # configure the cluster dns lookup family
+ # valid options are: auto (default), v4, v6, all
+ # dns-lookup-family: auto
+ # the maximum requests for upstream connections.
+ # If not specified, there is no limit.
+ # Setting this parameter to 1 will effectively disable keep alive
+ # max-requests-per-connection: 0
+ # the soft limit on size of the cluster’s new connection read and write buffers
+ # per-connection-buffer-limit-bytes: 32768
+ #
+ # network:
+ # Configure the number of additional ingress proxy hops from the
+ # right side of the x-forwarded-for HTTP header to trust.
+ # num-trusted-hops: 0
+ # Configure the port used to access the Envoy Admin interface.
+ # admin-port: 9001
+ #
+ # Configure an optional global rate limit service.
+ # rateLimitService:
+ # Identifies the extension service defining the rate limit service,
+ # formatted as /.
+ # extensionService: projectcontour/ratelimit
+ # Defines the rate limit domain to pass to the rate limit service.
+ # Acts as a container for a set of rate limit definitions within
+ # the RLS.
+ # domain: contour
+ # Defines whether to allow requests to proceed when the rate limit
+ # service fails to respond with a valid rate limit decision within
+ # the timeout defined on the extension service.
+ # failOpen: false
+ # Defines whether to include the X-RateLimit headers X-RateLimit-Limit,
+ # X-RateLimit-Remaining, and X-RateLimit-Reset (as defined by the IETF
+ # Internet-Draft linked below), on responses to clients when the Rate
+ # Limit Service is consulted for a request.
+ # ref. https://tools.ietf.org/id/draft-polli-ratelimit-headers-03.html
+ # enableXRateLimitHeaders: false
+ # Defines whether to translate status code 429 to grpc code RESOURCE_EXHAUSTED
+ # instead of the default UNAVAILABLE
+ # enableResourceExhaustedCode: false
+ #
+ # Global Policy settings.
+ # policy:
+ # # Default headers to set on all requests (unless set/removed on the HTTPProxy object itself)
+ # request-headers:
+ # set:
+ # # example: the hostname of the Envoy instance that proxied the request
+ # X-Envoy-Hostname: %HOSTNAME%
+ # # example: add a l5d-dst-override header to instruct Linkerd what service the request is destined for
+ # l5d-dst-override: %CONTOUR_SERVICE_NAME%.%CONTOUR_NAMESPACE%.svc.cluster.local:%CONTOUR_SERVICE_PORT%
+ # # default headers to set on all responses (unless set/removed on the HTTPProxy object itself)
+ # response-headers:
+ # set:
+ # # example: Envoy flags that provide additional details about the response or connection
+ # X-Envoy-Response-Flags: %RESPONSE_FLAGS%
+ # Whether or not the policy settings should apply to ingress objects
+ # applyToIngress: true
+ #
+ # metrics:
+ # contour:
+ # address: 0.0.0.0
+ # port: 8000
+ # server-certificate-path: /path/to/server-cert.pem
+ # server-key-path: /path/to/server-private-key.pem
+ # ca-certificate-path: /path/to/root-ca-for-client-validation.pem
+ # envoy:
+ # address: 0.0.0.0
+ # port: 8002
+ # server-certificate-path: /path/to/server-cert.pem
+ # server-key-path: /path/to/server-private-key.pem
+ # ca-certificate-path: /path/to/root-ca-for-client-validation.pem
+ #
+ # listener:
+ # connection-balancer: exact
+ # socket-options:
+ # tos: 64
+ # traffic-class: 64
+```
+
+_Note:_ The default example `contour` includes this [file][1] for easy deployment of Contour.
+
+## Environment Variables
+
+### CONTOUR_NAMESPACE
+
+If present, the value of the `CONTOUR_NAMESPACE` environment variable is used as:
+
+1. The value for the `contour bootstrap --namespace` flag unless otherwise specified.
+1. The value for the `contour certgen --namespace` flag unless otherwise specified.
+1. The value for the `contour serve --envoy-service-namespace` flag unless otherwise specified.
+1. The value for the `contour serve --leader-election-resource-namespace` flag unless otherwise specified.
+
+The `CONTOUR_NAMESPACE` environment variable is set via the [Downward API][6] in the Contour [example manifests][7].
+
+## Bootstrap Config File
+
+The bootstrap configuration file is generated by an initContainer in the Envoy daemonset which runs the `contour bootstrap` command to generate the file.
+This configuration file configures the Envoy container to connect to Contour and receive configuration via xDS.
+
+The next section outlines all the available flags that can be passed to the `contour bootstrap` command which are used to customize
+the configuration file to match the environment in which Envoy is deployed.
+
+### Bootstrap Flags
+
+There are flags that can be passed to `contour bootstrap` that help configure how Envoy
+connects to Contour:
+
+| Flag | Default | Description |
+| -------------------------------------- |-------------------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| --resources-dir | "" | Directory where resource files will be written. |
+| --admin-address | /admin/admin.sock | Path to Envoy admin unix domain socket. |
+| --admin-port (Deprecated) | 9001 | Deprecated: Port is now configured as a Contour flag. |
+| --xds-address | 127.0.0.1 | Address to connect to Contour xDS server on. |
+| --xds-port | 8001 | Port to connect to Contour xDS server on. |
+| --envoy-cafile | "" | CA filename for Envoy secure xDS gRPC communication. |
+| --envoy-cert-file | "" | Client certificate filename for Envoy secure xDS gRPC communication. |
+| --envoy-key-file | "" | Client key filename for Envoy secure xDS gRPC communication. |
+| --namespace | projectcontour | Namespace the Envoy container will run, also configured via ENV variable "CONTOUR_NAMESPACE". Namespace is used as part of the metric names on static resources defined in the bootstrap configuration file. |
+| --xds-resource-version | v3 | Currently, the only valid xDS API resource version is `v3`. |
+| --dns-lookup-family | auto | Defines what DNS Resolution Policy to use for Envoy -> Contour cluster name lookup. Either v4, v6, auto or all. |
+| --log-format | text | Log output format for Contour. Either text or json. |
+| --overload-max-heap | 0 | Defines the maximum heap memory of the envoy controlled by the overload manager. When the value is greater than 0, the overload manager is enabled, and when envoy reaches 95% of the maximum heap size, it performs a shrink heap operation. When it reaches 98% of the maximum heap size, Envoy Will stop accepting requests. |
+
+
+[1]: {{< param github_url>}}/tree/{{< param branch >}}/examples/contour/01-contour-config.yaml
+[2]: config/access-logging
+[3]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/
+[4]: https://golang.org/pkg/time/#ParseDuration
+[5]: https://godoc.org/github.com/projectcontour/contour/internal/envoy#DefaultFields
+[6]: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/
+[7]: {{< param github_url>}}/tree/{{< param branch >}}/examples/contour
+[8]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-field-config-core-v3-httpprotocoloptions-idle-timeout
+[9]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-field-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-stream-idle-timeout
+[10]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-field-config-core-v3-httpprotocoloptions-max-connection-duration
+[11]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-field-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-drain-timeout
+[12]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-field-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-request-timeout
+[13]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#envoy-v3-api-field-extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-delayed-close-timeout
+[14]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/listener/v3/listener.proto#config-listener-v3-listener-connectionbalanceconfig
diff --git a/site/content/docs/1.29/deploy-options.md b/site/content/docs/1.29/deploy-options.md
new file mode 100644
index 00000000000..0ae74a53bd9
--- /dev/null
+++ b/site/content/docs/1.29/deploy-options.md
@@ -0,0 +1,383 @@
+# Deployment Options
+
+The [Getting Started][8] guide shows you a simple way to get started with Contour on your cluster.
+This topic explains the details and shows you additional options.
+Most of this covers running Contour using a Kubernetes Service of `Type: LoadBalancer`.
+If you don't have a cluster with that capability see the [Running without a Kubernetes LoadBalancer][1] section.
+
+## Installation
+
+Contour requires a secret containing TLS certificates that are used to secure the gRPC communication between Contour<>Envoy.
+This secret can be auto-generated by the Contour `certgen` job or provided by an administrator.
+Traffic must be forwarded to Envoy, typically via a Service of `type: LoadBalancer`.
+All other requirements such as RBAC permissions, configuration details, are provided or have good defaults for most installations.
+
+### Setting resource requests and limits
+
+It is recommended that resource requests and limits be set on all Contour and Envoy containers.
+The example YAML manifests used in the [Getting Started][8] guide do not include these, because the appropriate values can vary widely from user to user.
+The table below summarizes the Contour and Envoy containers, and provides some reasonable resource requests to start with (note that these should be adjusted based on observed usage and expected load):
+
+| Workload | Container | Request (mem) | Request (cpu) |
+| ------------------- | ---------------- | ------------- | ------------- |
+| deployment/contour | contour | 128Mi | 250m |
+| daemonset/envoy | envoy | 256Mi | 500m |
+| daemonset/envoy | shutdown-manager | 50Mi | 25m |
+
+
+### Envoy as Daemonset
+
+The recommended installation is for Contour to run as a Deployment and Envoy to run as a Daemonset.
+The example Damonset places a single instance of Envoy per node in the cluster as well as attaches to `hostPorts` on each node.
+This model allows for simple scaling of Envoy instances as well as ensuring even distribution of instances across the cluster.
+
+The [example daemonset manifest][2] or [Contour Gateway Provisioner][12] will create an installation based on these recommendations.
+
+_Note: If the size of the cluster is scaled down, connections can be lost since Kubernetes Damonsets do not follow proper `preStop` hooks._
+
+### Envoy as Deployment
+
+An alternative Envoy deployment model is utilizing a Kubernetes Deployment with a configured `podAntiAffinity` which attempts to mirror the Daemonset deployment model.
+A benefit of this model compared to the Daemonset version is when a node is removed from the cluster, the proper shutdown events are available so connections can be cleanly drained from Envoy before terminating.
+
+The [example deployment manifest][14] will create an installation based on these recommendations.
+
+## Testing your installation
+
+### Get your hostname or IP address
+
+To retrieve the IP address or DNS name assigned to your Contour deployment, run:
+
+```bash
+$ kubectl get -n projectcontour service envoy -o wide
+```
+
+On AWS, for example, the response looks like:
+
+```
+NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+contour 10.106.53.14 a47761ccbb9ce11e7b27f023b7e83d33-2036788482.ap-southeast-2.elb.amazonaws.com 80:30274/TCP 3h app=contour
+```
+
+Depending on your cloud provider, the `EXTERNAL-IP` value is an IP address, or, in the case of Amazon AWS, the DNS name of the ELB created for Contour. Keep a record of this value.
+
+Note that if you are running an Elastic Load Balancer (ELB) on AWS, you must add more details to your configuration to get the remote address of your incoming connections.
+See the [instructions for enabling the PROXY protocol.][4]
+
+#### Minikube
+
+On Minikube, to get the IP address of the Contour service run:
+
+```bash
+$ minikube service -n projectcontour envoy --url
+```
+
+The response is always an IP address, for example `http://192.168.99.100:30588`. This is used as CONTOUR_IP in the rest of the documentation.
+
+#### kind
+
+When creating the cluster on Kind, pass a custom configuration to allow Kind to expose port 80/443 to your local host:
+
+```yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+- role: worker
+ extraPortMappings:
+ - containerPort: 80
+ hostPort: 80
+ listenAddress: "0.0.0.0"
+ - containerPort: 443
+ hostPort: 443
+ listenAddress: "0.0.0.0"
+```
+
+Then run the create cluster command passing the config file as a parameter.
+This file is in the `examples/kind` directory:
+
+```bash
+$ kind create cluster --config examples/kind/kind-expose-port.yaml
+```
+
+Then, your CONTOUR_IP (as used below) will just be `localhost:80`.
+
+_Note: We've created a public DNS record (`local.projectcontour.io`) which is configured to resolve to `127.0.0.1``. This allows you to use a real domain name in your kind cluster._
+
+### Test with Ingress
+
+The Contour repository contains an example deployment of the Kubernetes Up and Running demo application, [kuard][5].
+To test your Contour deployment, deploy `kuard` with the following command:
+
+```bash
+$ kubectl apply -f https://projectcontour.io/examples/kuard.yaml
+```
+
+Then monitor the progress of the deployment with:
+
+```bash
+$ kubectl get po,svc,ing -l app=kuard
+```
+
+You should see something like:
+
+```
+NAME READY STATUS RESTARTS AGE
+po/kuard-370091993-ps2gf 1/1 Running 0 4m
+po/kuard-370091993-r63cm 1/1 Running 0 4m
+po/kuard-370091993-t4dqk 1/1 Running 0 4m
+
+NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+svc/kuard 10.110.67.121 80/TCP 4m
+
+NAME HOSTS ADDRESS PORTS AGE
+ing/kuard * 10.0.0.47 80 4m
+```
+
+... showing that there are three Pods, one Service, and one Ingress that is bound to all virtual hosts (`*`).
+
+In your browser, navigate your browser to the IP or DNS address of the Contour Service to interact with the demo application.
+
+### Test with HTTPProxy
+
+To test your Contour deployment with [HTTPProxy][9], run the following command:
+
+```sh
+$ kubectl apply -f https://projectcontour.io/examples/kuard-httpproxy.yaml
+```
+
+Then monitor the progress of the deployment with:
+
+```sh
+$ kubectl get po,svc,httpproxy -l app=kuard
+```
+
+You should see something like:
+
+```sh
+NAME READY STATUS RESTARTS AGE
+pod/kuard-bcc7bf7df-9hj8d 1/1 Running 0 1h
+pod/kuard-bcc7bf7df-bkbr5 1/1 Running 0 1h
+pod/kuard-bcc7bf7df-vkbtl 1/1 Running 0 1h
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/kuard ClusterIP 10.102.239.168 80/TCP 1h
+
+NAME FQDN TLS SECRET FIRST ROUTE STATUS STATUS DESCRIPT
+httpproxy.projectcontour.io/kuard kuard.local valid valid HTTPProxy
+```
+
+... showing that there are three Pods, one Service, and one HTTPProxy .
+
+In your terminal, use curl with the IP or DNS address of the Contour Service to send a request to the demo application:
+
+```sh
+$ curl -H 'Host: kuard.local' ${CONTOUR_IP}
+```
+
+## Running without a Kubernetes LoadBalancer
+
+If you can't or don't want to use a Service of `type: LoadBalancer` there are other ways to run Contour.
+
+### NodePort Service
+
+If your cluster doesn't have the capability to configure a Kubernetes LoadBalancer,
+or if you want to configure the load balancer outside Kubernetes,
+you can change the Envoy Service in the [`02-service-envoy.yaml`][7] file and set `type` to `NodePort`.
+
+This will have every node in your cluster listen on the resultant port and forward traffic to Contour.
+That port can be discovered by taking the second number listed in the `PORT` column when listing the service, for example `30274` in `80:30274/TCP`.
+
+Now you can point your browser at the specified port on any node in your cluster to communicate with Contour.
+
+### Host Networking
+
+You can run Contour without a Kubernetes Service at all.
+This is done by having the Envoy pod run with host networking.
+Contour's examples utilize this model in the `/examples` directory.
+To configure, set: `hostNetwork: true` and `dnsPolicy: ClusterFirstWithHostNet` on your Envoy pod definition.
+Next, pass `--envoy-service-http-port=80 --envoy-service-https-port=443` to the contour `serve` command which instructs Envoy to listen directly on port 80/443 on each host that it is running.
+This is best paired with a DaemonSet (perhaps paired with Node affinity) to ensure that a single instance of Contour runs on each Node.
+See the [AWS NLB tutorial][10] as an example.
+
+## Disabling Features
+
+You can run Contour with certain features disabled by passing `--disable-feature` flag to the Contour `serve` command.
+The flag is used to disable the informer for a custom resource, effectively making the corresponding CRD optional in the cluster.
+You can provide the flag multiple times.
+
+For example, to disable ExtensionService CRD, use the flag as follows: `--disable-feature=extensionservices`.
+
+See the [configuration section entry][19] for all options.
+
+## Upgrading Contour/Envoy
+
+At times, it's needed to upgrade Contour, the version of Envoy, or both.
+The included `shutdown-manager` can assist with watching Envoy for open connections while draining and give signal back to Kubernetes as to when it's fine to delete Envoy pods during this process.
+
+See the [redeploy envoy][11] docs for more information about how to not drop active connections to Envoy.
+Also see the [upgrade guides][15] on steps to roll out a new version of Contour.
+
+## Running Multiple Instances of Contour
+
+It's possible to run multiple instances of Contour within a single Kubernetes cluster.
+This can be useful for separating external vs. internal ingress, for having separate ingress controllers for different ingress classes, and more.
+Each Contour instance can also be configured via the `--watch-namespaces` flag to handle their own namespaces. This allows the Kubernetes RBAC objects
+to be restricted further.
+
+The recommended way to deploy multiple Contour instances is to put each instance in its own namespace.
+This avoids most naming conflicts that would otherwise occur, and provides better logical separation between the instances.
+However, it is also possible to deploy multiple instances in a single namespace if needed; this approach requires more modifications to the example manifests to function properly.
+Each approach is described in detail below, using the [examples/contour][17] directory's manifests for reference.
+
+### In Separate Namespaces (recommended)
+
+In general, this approach requires updating the `namespace` of all resources, as well as giving unique names to cluster-scoped resources to avoid conflicts.
+
+- `00-common.yaml`:
+ - update the name of the `Namespace`
+ - update the namespace of both `ServiceAccounts`
+- `01-contour-config.yaml`:
+ - update the namespace of the `ConfigMap`
+ - if you have any namespaced references within the ConfigMap contents (e.g. `fallback-certificate`, `envoy-client-certificate`), ensure those point to the correct namespace as well.
+- `01-crds.yaml` will be shared between the two instances; no changes are needed.
+- `02-job-certgen.yaml`:
+ - update the namespace of all resources
+ - update the namespace of the `ServiceAccount` subject within the `RoleBinding`
+- `02-role-contour.yaml`:
+ - update the name of the `ClusterRole` to be unique
+ - update the namespace of the `Role`
+- `02-rbac.yaml`:
+ - update the name of the `ClusterRoleBinding` to be unique
+ - update the namespace of the `RoleBinding`
+ - update the namespaces of the `ServiceAccount` subject within both resources
+ - update the name of the ClusterRole within the ClusterRoleBinding's roleRef to match the unique name used in `02-role-contour.yaml`
+- `02-service-contour.yaml`:
+ - update the namespace of the `Service`
+- `02-service-envoy.yaml`:
+ - update the namespace of the `Service`
+- `03-contour.yaml`:
+ - update the namespace of the `Deployment`
+ - add an argument to the container, `--ingress-class-name=`, so this instance only processes Ingresses/HTTPProxies with the given ingress class.
+- `03-envoy.yaml`:
+ - update the namespace of the `DaemonSet`
+ - remove the two `hostPort` definitions from the container (otherwise, these would conflict between the two instances)
+
+
+### In The Same Namespace
+
+This approach requires giving unique names to all resources to avoid conflicts, and updating all resource references to use the correct names.
+
+- `00-common.yaml`:
+ - update the names of both `ServiceAccounts` to be unique
+- `01-contour-config.yaml`:
+ - update the name of the `ConfigMap` to be unique
+- `01-crds.yaml` will be shared between the two instances; no changes are needed.
+- `02-job-certgen.yaml`:
+ - update the names of all resources to be unique
+ - update the name of the `Role` within the `RoleBinding`'s roleRef to match the unique name used for the `Role`
+ - update the name of the `ServiceAccount` within the `RoleBinding`'s subjects to match the unique name used for the `ServiceAccount`
+ - update the serviceAccountName of the `Job`
+ - add an argument to the container, `--secrets-name-suffix=`, so the generated TLS secrets have unique names
+ - update the spec.template.metadata.labels on the `Job` to be unique
+- `02-role-contour.yaml`:
+ - update the names of the `ClusterRole` and `Role` to be unique
+- `02-rbac.yaml`:
+ - update the names of the `ClusterRoleBinding` and `RoleBinding` to be unique
+ - update the roleRefs within both resources to reference the unique `Role` and `ClusterRole` names used in `02-role-contour.yaml`
+ - update the subjects within both resources to reference the unique `ServiceAccount` name used in `00-common.yaml`
+- `02-service-contour.yaml`:
+ - update the name of the `Service` to be unique
+ - update the selector to be unique (this must match the labels used in `03-contour.yaml`, below)
+- `02-service-envoy.yaml`:
+ - update the name of the `Service` to be unique
+ - update the selector to be unique (this must match the labels used in `03-envoy.yaml`, below)
+- `03-contour.yaml`:
+ - update the name of the `Deployment` to be unique
+ - update the metadata.labels, the spec.selector.matchLabels, the spec.template.metadata.labels, and the spec.template.spec.affinity.podAntiAffinity labels to match the labels used in `02-service-contour.yaml`
+ - update the serviceAccountName to match the unique name used in `00-common.yaml`
+ - update the `contourcert` volume to reference the unique `Secret` name generated from `02-certgen.yaml` (e.g. `contourcert`)
+ - update the `contour-config` volume to reference the unique `ConfigMap` name used in `01-contour-config.yaml`
+ - add an argument to the container, `--leader-election-resource-name=`, so this Contour instance uses a separate leader election `Lease`
+ - add an argument to the container, `--envoy-service-name=`, referencing the unique name used in `02-service-envoy.yaml`
+ - add an argument to the container, `--ingress-class-name=`, so this instance only processes Ingresses/HTTPProxies with the given ingress class.
+- `03-envoy.yaml`:
+ - update the name of the `DaemonSet` to be unique
+ - update the metadata.labels, the spec.selector.matchLabels, and the spec.template.metadata.labels to match the unique labels used in `02-service-envoy.yaml`
+ - update the `--xds-address` argument to the initContainer to use the unique name of the contour Service from `02-service-contour.yaml`
+ - update the serviceAccountName to match the unique name used in `00-common.yaml`
+ - update the `envoycert` volume to reference the unique `Secret` name generated from `02-certgen.yaml` (e.g. `envoycert`)
+ - remove the two `hostPort` definitions from the container (otherwise, these would conflict between the two instances)
+
+### Using the Gateway provisioner
+
+The Contour Gateway provisioner also supports deploying multiple instances of Contour, either in the same namespace or different namespaces.
+See [Getting Started with the Gateway provisioner][16] for more information on getting started with the Gateway provisioner.
+To deploy multiple Contour instances, you create multiple `Gateways`, either in the same namespace or in different namespaces.
+
+Note that although the provisioning request itself is made via a Gateway API resource (`Gateway`), this method of installation still allows you to use *any* of the supported APIs for defining virtual hosts and routes: `Ingress`, `HTTPProxy`, or Gateway API's `HTTPRoute` and `TLSRoute`.
+
+If you are using `Ingress` or `HTTPProxy`, you will likely want to assign each Contour instance a different ingress class, so they each handle different subsets of `Ingress`/`HTTPProxy` resources.
+To do this, [create two separate GatewayClasses][18], each with a different `ContourDeployment` parametersRef.
+The `ContourDeployment` specs should look like:
+
+```yaml
+kind: ContourDeployment
+apiVersion: projectcontour.io/v1alpha1
+metadata:
+ namespace: projectcontour
+ name: ingress-class-1
+spec:
+ runtimeSettings:
+ ingress:
+ classNames:
+ - ingress-class-1
+---
+kind: ContourDeployment
+apiVersion: projectcontour.io/v1alpha1
+metadata:
+ namespace: projectcontour
+ name: ingress-class-2
+spec:
+ runtimeSettings:
+ ingress:
+ classNames:
+ - ingress-class-2
+```
+
+Then create each `Gateway` with the appropriate `spec.gatewayClassName`.
+
+## Running Contour in tandem with another ingress controller
+
+If you're running multiple ingress controllers, or running on a cloudprovider that natively handles ingress,
+you can specify the annotation `kubernetes.io/ingress.class: "contour"` on all ingresses that you would like Contour to claim.
+You can customize the class name with the `--ingress-class-name` flag at runtime. (A comma-separated list of class names is allowed.)
+If the `kubernetes.io/ingress.class` annotation is present with a value other than `"contour"`, Contour will ignore that ingress.
+
+## Uninstall Contour
+
+To remove Contour or the Contour Gateway Provisioner from your cluster, delete the namespace:
+
+```bash
+$ kubectl delete ns projectcontour
+```
+**Note**: Your namespace may differ from above.
+
+[1]: #running-without-a-kubernetes-loadbalancer
+[2]: {{< param github_url>}}/tree/{{< param branch >}}/examples/render/contour.yaml
+[3]: #host-networking
+[4]: guides/proxy-proto.md
+[5]: https://github.com/kubernetes-up-and-running/kuard
+[7]: {{< param github_url>}}/tree/{{< param branch >}}/examples/contour/02-service-envoy.yaml
+[8]: /getting-started
+[9]: config/fundamentals.md
+[10]: guides/deploy-aws-nlb.md
+[11]: redeploy-envoy.md
+[12]: {{< param github_url>}}/tree/{{< param branch >}}/examples/render/contour-gateway-provisioner.yaml
+[13]: https://projectcontour.io/resources/deprecation-policy/
+[14]: {{< param github_url>}}/tree/{{< param branch >}}/examples/render/contour-deployment.yaml
+[15]: /resources/upgrading/
+[16]: https://projectcontour.io/getting-started/#option-3-contour-gateway-provisioner-alpha
+[17]: {{< param github_url>}}/tree/{{< param branch >}}/examples/contour
+[18]: guides/gateway-api/#next-steps
+[19]: configuration.md
\ No newline at end of file
diff --git a/site/content/docs/1.29/github.md b/site/content/docs/1.29/github.md
new file mode 100644
index 00000000000..8a0f36b4f4d
--- /dev/null
+++ b/site/content/docs/1.29/github.md
@@ -0,0 +1,80 @@
+This document outlines how we use GitHub.
+
+## Milestones
+
+Contour attempts to ship on a quarterly basis.
+These releases are tracked with a milestone.
+The _current_ release is the milestone with the closest delivery date.
+
+Issues which are not assigned to the current milestone _should not be worked on_.
+
+## Priorities
+
+This project has three levels of priority:
+
+- p0 - Must fix immediately.
+This is reserved for bugs and security issues. A milestone cannot ship with open p0 issues.
+- p1 - Should be done.
+p1 issues assigned to a milestone _should_ be completed during that milestone.
+- p2 - May be done.
+p2 issues assigned to a milestone _may_ be completed during that milestone if time permits.
+
+Issues without a priority are _unprioritised_. Priority will be assigned by a PM or release manager during issue triage.
+
+## Questions
+
+We encourage support questions via issues.
+Questions will be tagged `question` and are not assigned a milestone or a priority.
+
+## Waiting for information
+
+Any issue which lacks sufficient information for triage will be tagged `waiting-for-info`.
+Issues with this tag may be closed after a reasonable length of time if further information is not forthcoming.
+
+## Issue tagging
+
+Issues without tags have not be triaged.
+
+During issue triage, usually by a project member, release manager, or pm, one or more tags will be assigned.
+
+- `Needs-Product` indicates the issue needs attention by a product owner or PM.
+- `Needs-design-doc` indicates the issue requires a design document to be circulated.
+
+These are blocking states, these labels must be resolved, either by PM or agreeing on a design.
+
+## Assigning an issue
+
+Issues within a milestone _should_ be assigned to an owner when work commences on them.
+Assigning an issue indicates that you are working on it.
+
+Before you start to work on an issue you should assign yourself.
+From that point onward you are responsible for the issue and you are expected to report timely status on the issue to anyone that asks.
+
+If you cease work on an issue, even if incomplete, you should leave a comment to that effect on the issue and remove yourself as the assignee.
+From that point onward you are no longer responsible for the issue, however you may be approached as a subject matter expert--as the last person to touch the issue--by future assignees.
+
+For infrequent contributors who are not members of the Contour project, assign yourself by leaving a comment to that effect on the issue.
+
+*Do not hoard issues, you won't enjoy it*
+
+## Requesting a review
+
+PRs which are related to issues in the current milestone should be assigned to the current milestone.
+This is an indicator to reviewers that the PR is ready for review and should be reviewed in the current milestone.
+Occasionally PRs may be assigned to the next milestone indicating they are for review at the start of the next development cycle.
+
+All PRs should reference the issue they relate to either by one of the following;
+
+- `Fixes #NNNN` indicating that merging this issue will fix issue #NNNN
+- `Updates #NNNN` indicating that merging this issue will progress issue #NNNN to some degree.
+
+If there is no `Updates` or `Fixes` line in the PR the review will, with the exception of trivial or self evident fixes, be deferred.
+
+[Further reading][1]
+
+## Help wanted and good first issues
+
+The `help wanted` and `good first issue` tags _may_ be assigned to issues _in the current milestone_.
+To limit the amount of work in progress, `help wanted` and `good first issue` should not be used for issues outside the current milestone.
+
+[1]: https://dave.cheney.net/2019/02/18/talk-then-code
\ No newline at end of file
diff --git a/site/content/docs/1.29/grpc-tls-howto.md b/site/content/docs/1.29/grpc-tls-howto.md
new file mode 100644
index 00000000000..51770de950d
--- /dev/null
+++ b/site/content/docs/1.29/grpc-tls-howto.md
@@ -0,0 +1,169 @@
+# Enabling TLS between Envoy and Contour
+
+This document describes the steps required to secure communication between Envoy and Contour.
+The outcome of this is that we will have two Secrets available in the `projectcontour` namespace:
+
+- **contourcert:** contains Contour's keypair which is used for serving TLS secured gRPC, and the CA's public certificate bundle which is used for validating Envoy's client certificate.
+Contour's certificate must be a valid certificate for the name `contour` in order for this to work.
+This is currently hardcoded by Contour.
+- **envoycert:** contains Envoy's keypair which used as a client for connecting to Contour, and the CA's public certificate bundle which is used for validating Contour's server certificate.
+
+Note that both Secrets contain a copy of the CA certificate bundle under the `ca.crt` data key.
+
+## Ways you can get the certificates into your cluster
+
+- Deploy the Job from [certgen.yaml][1].
+This will run `contour certgen --kube --secrets-format=compact` for you.
+- Run `contour certgen --kube` locally.
+- Run the manual procedure below.
+
+## Caveats and warnings
+
+**Be very careful with your production certificates!**
+
+This is intended as an example to help you get started.
+For any real deployment, you should **carefully** manage all the certificates and control who has access to them.
+Make sure you don't commit them to any git repositories either.
+
+## Manual TLS certificate generation process
+
+### Generating a CA keypair
+
+First, we need to generate a keypair:
+
+```
+$ openssl req -x509 -new -nodes \
+ -keyout certs/cakey.pem -sha256 \
+ -days 1825 -out certs/cacert.pem \
+ -subj "/O=Project Contour/CN=Contour CA"
+```
+
+Then, the new CA key will be stored in `certs/cakey.pem` and the cert in `certs/cacert.pem`.
+
+### Generating Contour's keypair
+
+Next, we need to generate a keypair for Contour.
+First, we make a new private key:
+
+```
+$ openssl genrsa -out certs/contourkey.pem 2048
+```
+
+Then, we create a CSR and have our CA sign the CSR and issue a certificate.
+This uses the file [certs/cert-contour.ext][2], which ensures that at least one of the valid names of the certificate is the bareword `contour`.
+This is required for the handshake to succeed, as `contour bootstrap` configures Envoy to pass this as the SNI server name for the connection.
+
+```
+$ openssl req -new -key certs/contourkey.pem \
+ -out certs/contour.csr \
+ -subj "/O=Project Contour/CN=contour"
+
+$ openssl x509 -req -in certs/contour.csr \
+ -CA certs/cacert.pem \
+ -CAkey certs/cakey.pem \
+ -CAcreateserial \
+ -out certs/contourcert.pem \
+ -days 1825 -sha256 \
+ -extfile certs/cert-contour.ext
+```
+
+At this point, the contour certificate and key are in the files `certs/contourcert.pem` and `certs/contourkey.pem` respectively.
+
+### Generating Envoy's keypair
+
+Next, we generate a keypair for Envoy:
+
+```
+$ openssl genrsa -out certs/envoykey.pem 2048
+```
+
+Then, we generate a CSR and have the CA sign it:
+
+```
+$ openssl req -new -key certs/envoykey.pem \
+ -out certs/envoy.csr \
+ -subj "/O=Project Contour/CN=envoy"
+
+$ openssl x509 -req -in certs/envoy.csr \
+ -CA certs/cacert.pem \
+ -CAkey certs/cakey.pem \
+ -CAcreateserial \
+ -out certs/envoycert.pem \
+ -days 1825 -sha256 \
+ -extfile certs/cert-envoy.ext
+```
+
+Like the Contour certificate, this CSR uses the file [certs/cert-envoy.ext][3].
+However, in this case, there are no special names required.
+
+### Putting the certificates in the cluster
+
+Next, we create the required Secrets in the target Kubernetes cluster:
+
+```bash
+$ kubectl create secret -n projectcontour generic contourcert \
+ --from-file=tls.key=./certs/contourkey.pem \
+ --from-file=tls.crt=./certs/contourcert.pem \
+ --from-file=ca.crt=./certs/cacert.pem \
+ --save-config
+
+$ kubectl create secret -n projectcontour generic envoycert \
+ --from-file=tls.key=./certs/envoykey.pem \
+ --from-file=tls.crt=./certs/envoycert.pem \
+ --from-file=ca.crt=./certs/cacert.pem \
+ --save-config
+```
+
+Note that we don't put the CA **key** into the cluster, there's no reason for that to be there, and that would create a security problem.
+
+## Rotating Certificates
+
+Eventually the certificates that Contour and Envoy use will need to be rotated.
+The following steps can be taken to replace the certificates that Contour and Envoy are using:
+
+1. Generate a new keypair for both Contour and Envoy (optionally also for the CA)
+2. Update the Secrets that hold the gRPC TLS keypairs
+3. Contour and Envoy will automatically rotate their certificates after mounted secrets have been updated by the kubelet
+
+The secrets can be updated in-place by running:
+
+```bash
+$ kubectl create secret -n projectcontour generic contourcert \
+ --from-file=tls.key=./certs/contourkey.pem \
+ --from-file=tls.crt=./certs/contourcert.pem \
+ --from-file=ca.crt=./certs/cacert.pem \
+ --dry-run -o json \
+ | kubectl apply -f -
+
+$ kubectl create secret -n projectcontour generic envoycert \
+ --from-file=tls.key=./certs/envoykey.pem \
+ --from-file=tls.crt=./certs/envoycert.pem \
+ --from-file=ca.crt=./certs/cacert.pem \
+ --dry-run -o json \
+ | kubectl apply -f -
+```
+
+There are few preconditions that need to be met before Envoy can automatically reload certificate and key files:
+
+- Envoy must be version v1.14.1 or later
+- The bootstrap configuration must be generated with `contour bootstrap` using the `--resources-dir` argument, see [examples/contour/03-envoy.yaml][4]
+
+### Rotate using the contour-certgen job
+
+When using the built-in Contour certificate generation, the following steps can be used:
+
+1. Delete the contour-certgen job
+ - `kubectl delete job contour-certgen -n projectcontour`
+2. Reapply the contour-certgen job from [certgen.yaml][1]
+
+## Conclusion
+
+Once this process is done, the certificates will be present as Secrets in the `projectcontour` namespace, as required by
+[examples/contour][5].
+
+[1]: {{< param github_url >}}/tree/{{< param branch >}}/examples/contour/02-job-certgen.yaml
+[2]: {{< param github_url >}}/tree/{{< param branch >}}/certs/cert-contour.ext
+[3]: {{< param github_url >}}/tree/{{< param branch >}}/certs/cert-envoy.ext
+[4]: {{< param github_url >}}/tree/{{< param branch >}}/examples/contour/03-envoy.yaml
+[5]: {{< param github_url >}}/tree/{{< param branch >}}/examples/contour
+
diff --git a/site/content/docs/1.29/guides/_index.md b/site/content/docs/1.29/guides/_index.md
new file mode 100644
index 00000000000..8981b8fbd79
--- /dev/null
+++ b/site/content/docs/1.29/guides/_index.md
@@ -0,0 +1,9 @@
+---
+title: Guides
+description: Contour Resources
+id: guides
+---
+## Getting things done with Contour
+
+This page contains links to articles on configuring specific Contour features.
+
diff --git a/site/content/docs/1.29/guides/cert-manager.md b/site/content/docs/1.29/guides/cert-manager.md
new file mode 100644
index 00000000000..0f926946eda
--- /dev/null
+++ b/site/content/docs/1.29/guides/cert-manager.md
@@ -0,0 +1,670 @@
+---
+title: Deploying HTTPS services with Contour and cert-manager
+---
+
+This tutorial shows you how to securely deploy an HTTPS web application on a Kubernetes cluster, using:
+
+- Kubernetes
+- Contour, as the Ingress controller
+- [JetStack's cert-manager][1] to provision TLS certificates from [the Let's Encrypt project][6]
+
+## Prerequisites
+
+- A Kubernetes cluster deployed in either a data center or a cloud provider with a Kubernetes as a service offering. This tutorial was last tested on a GKE cluster running Kubernetes 1.22
+- RBAC enabled on your cluster
+- Your cluster must be able to request a public IP address from your cloud provider, using a load balancer. If you're on AWS or GKE this is automatic if you deploy a Kubernetes service object of type: LoadBalancer. If you're on your own datacenter you must set it up yourself
+- A DNS domain that you control, where you host your web application
+- Administrator permissions for all deployment steps
+
+**NOTE:** To use a local cluster like `minikube` or `kind`, see the instructions in [the deployment guide][7].
+
+## Summary
+
+This tutorial walks you through deploying:
+
+1. [Contour][0]
+2. [Jetstack cert-manager][1]
+3. A sample web application using HTTPProxy
+
+**NOTE:** If you encounter failures related to permissions, make sure the user you are operating as has administrator permissions.
+
+After you've been through the steps the first time, you don't need to repeat deploying Contour and cert-manager for subsequent application deployments. Instead, you can skip to step 3.
+
+## 1. Deploy Contour
+
+Run:
+
+```bash
+$ kubectl apply -f {{< param base_url >}}/quickstart/contour.yaml
+```
+
+to set up Contour as a deployment in its own namespace, `projectcontour`, and tell the cloud provider to provision an external IP that is forwarded to the Contour pods.
+
+Check the progress of the deployment with this command:
+
+```bash
+$ kubectl -n projectcontour get po
+NAME READY STATUS RESTARTS AGE
+contour-5475898957-jh9fm 1/1 Running 0 39s
+contour-5475898957-qlbs2 1/1 Running 0 39s
+contour-certgen-v1.19.0-5xthf 0/1 Completed 0 39s
+envoy-hqbkm 2/2 Running 0 39s
+```
+
+After all the `contour` & `envoy` pods reach `Running` status and fully `Ready`, move on to the next step.
+
+### Access your cluster
+
+Retrieve the external address of the load balancer assigned to Contour's Envoys by your cloud provider:
+
+```bash
+$ kubectl get -n projectcontour service envoy -o wide
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
+envoy LoadBalancer 10.51.245.99 35.189.26.87 80:30111/TCP,443:30933/TCP 38d app=envoy
+```
+
+The value of `EXTERNAL-IP` varies by cloud provider. In this example GKE gives a bare IP address; AWS gives you a long DNS name.
+
+To make it easier to work with the external load balancer, the tutorial adds a DNS record to a domain we control that points to this load balancer's IP address:
+
+```bash
+$ host gke.davecheney.com
+gke.davecheney.com has address 35.189.26.87
+```
+
+On AWS, you specify a `CNAME`, not an `A` record, and it would look something like this:
+
+```bash
+$ host aws.davecheney.com
+aws.davecheney.com is an alias for a4d1766f6ce1611e7b27f023b7e83d33–1465548734.ap-southeast-2.elb.amazonaws.com.
+a4d1766f6ce1611e7b27f023b7e83d33–1465548734.ap-southeast-2.elb.amazonaws.com has address 52.63.20.117
+a4d1766f6ce1611e7b27f023b7e83d33–1465548734.ap-southeast-2.elb.amazonaws.com has address 52.64.233.204
+```
+
+In your own data center, you need to arrange for traffic from a public IP address to be forwarded to the cluster IP of the Contour service. This is beyond the scope of the tutorial.
+
+### Testing connectivity
+
+You must deploy at least one Ingress object before Contour can configure Envoy to serve traffic.
+Note that as a security feature, Contour does not configure Envoy to expose a port to the internet unless there's a reason it should.
+For this tutorial we deploy a version of Kenneth Reitz's [httpbin.org service][3].
+
+To deploy httpbin to your cluster, run this command:
+
+```bash
+$ kubectl apply -f {{< param base_url >}}/examples/httpbin.yaml
+```
+
+Check that the pods are running:
+
+```bash
+$ kubectl get po -l app=httpbin
+NAME READY STATUS RESTARTS AGE
+httpbin-85777b684b-8sqw5 1/1 Running 0 24s
+httpbin-85777b684b-pb26w 1/1 Running 0 24s
+httpbin-85777b684b-vpgwl 1/1 Running 0 24s
+```
+
+Then type the DNS name you set up in the previous step into a web browser, for example `http://gke.davecheney.com/`. You should see something like:
+
+![httpbin screenshot][8]
+
+You can delete the httpbin service now, or at any time, by running:
+
+```bash
+$ kubectl delete -f {{< param base_url >}}/examples/httpbin.yaml
+```
+
+## 2. Deploy jetstack/cert-manager
+
+**NOTE:** cert-manager is a powerful product that provides more functionality than this tutorial demonstrates.
+There are plenty of [other ways to deploy cert-manager][4], but they are out of scope.
+
+### Fetch the source manager deployment manifest
+
+To keep things simple, we skip cert-manager's Helm installation, and use the [supplied YAML manifests][5].
+
+```bash
+$ kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
+```
+
+When cert-manager is up and running you should see something like:
+
+```bash
+$ kubectl -n cert-manager get all
+NAME READY STATUS RESTARTS AGE
+pod/cert-manager-cainjector-74bb68d67c-8lb2f 1/1 Running 0 40s
+pod/cert-manager-f7f8bf74d-65ld9 1/1 Running 0 40s
+pod/cert-manager-webhook-645b8bdb7-2h5t6 1/1 Running 0 40s
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/cert-manager ClusterIP 10.48.13.252 9402/TCP 40s
+service/cert-manager-webhook ClusterIP 10.48.7.220 443/TCP 40s
+
+NAME READY UP-TO-DATE AVAILABLE AGE
+deployment.apps/cert-manager 1/1 1 1 40s
+deployment.apps/cert-manager-cainjector 1/1 1 1 40s
+deployment.apps/cert-manager-webhook 1/1 1 1 40s
+
+NAME DESIRED CURRENT READY AGE
+replicaset.apps/cert-manager-cainjector-74bb68d67c 1 1 1 40s
+replicaset.apps/cert-manager-f7f8bf74d 1 1 1 40s
+replicaset.apps/cert-manager-webhook-645b8bdb7 1 1 1 40s
+```
+
+### Deploy the Let's Encrypt cluster issuer
+
+cert-manager supports two different CRDs for configuration, an `Issuer`, which is scoped to a single namespace,
+and a `ClusterIssuer`, which is cluster-wide.
+
+For Contour to be able to serve HTTPS traffic for an Ingress in any namespace, use `ClusterIssuer`.
+Create a file called `letsencrypt-staging.yaml` with the following contents:
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-staging
+ namespace: cert-manager
+spec:
+ acme:
+ email: user@example.com
+ privateKeySecretRef:
+ name: letsencrypt-staging
+ server: https://acme-staging-v02.api.letsencrypt.org/directory
+ solvers:
+ - http01:
+ ingress:
+ class: contour
+```
+
+replacing `user@example.com` with your email address.
+This is the email address that Let's Encrypt uses to communicate with you about certificates you request.
+
+The staging Let's Encrypt server is not bound by [the API rate limits of the production server][2].
+This approach lets you set up and test your environment without worrying about rate limits.
+You can then repeat this step for a production Let's Encrypt certificate issuer.
+
+After you edit and save the file, deploy it:
+
+```bash
+$ kubectl apply -f letsencrypt-staging.yaml
+clusterissuer.cert-manager.io/letsencrypt-staging created
+```
+
+Wait for the `ClusterIssuer` to be ready:
+
+```bash
+$ kubectl get clusterissuer letsencrypt-staging
+NAME READY AGE
+letsencrypt-staging True 54s
+```
+
+## 3. Deploy your first HTTPS site using Ingress
+
+For this tutorial we deploy a version of Kenneth Reitz's [httpbin.org service][3].
+We start with the deployment.
+Copy the following to a file called `deployment.yaml`:
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: httpbin
+ name: httpbin
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: httpbin
+ strategy:
+ rollingUpdate:
+ maxSurge: 1
+ maxUnavailable: 1
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ app: httpbin
+ spec:
+ containers:
+ - image: docker.io/kennethreitz/httpbin
+ name: httpbin
+ ports:
+ - containerPort: 8080
+ name: http
+ command: ["gunicorn"]
+ args: ["-b", "0.0.0.0:8080", "httpbin:app"]
+ dnsPolicy: ClusterFirst
+```
+
+Deploy to your cluster:
+
+```bash
+$ kubectl apply -f deployment.yaml
+deployment.apps/httpbin created
+$ kubectl get pod -l app=httpbin
+NAME READY STATUS RESTARTS AGE
+httpbin-67fd96d97c-8j2rr 1/1 Running 0 56m
+```
+
+Expose the deployment to the world with a Service. Create a file called `service.yaml` with
+the following contents:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: httpbin
+spec:
+ ports:
+ - port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ app: httpbin
+```
+
+and deploy:
+
+```bash
+$ kubectl apply -f service.yaml
+service/httpbin created
+$ kubectl get service httpbin
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+httpbin ClusterIP 10.48.6.155 8080/TCP 57m
+```
+
+Expose the Service to the world with Contour and an Ingress object. Create a file called `ingress.yaml` with
+the following contents:
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: httpbin
+spec:
+ rules:
+ - host: httpbin.davecheney.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: /
+ backend:
+ service:
+ name: httpbin
+ port:
+ number: 8080
+```
+
+The host name, `httpbin.davecheney.com` is a `CNAME` to the `gke.davecheney.com` record that was created in the first section, and must be created in the same place as the `gke.davecheney.com` record was.
+That is, in your cloud provider.
+This lets requests to `httpbin.davecheney.com` resolve to the external IP address of the Contour service.
+They are then forwarded to the Contour pods running in the cluster:
+
+```bash
+$ host httpbin.davecheney.com
+httpbin.davecheney.com is an alias for gke.davecheney.com.
+gke.davecheney.com has address 35.189.26.87
+```
+
+Change the value of `spec.rules.host` to something that you control, and deploy the Ingress to your cluster:
+
+```bash
+$ kubectl apply -f ingress.yaml
+ingress.networking.k8s.io/httpbin created
+$ kubectl get ingress httpbin
+NAME CLASS HOSTS ADDRESS PORTS AGE
+httpbin httpbin.davecheney.com 80 12s
+```
+
+Now you can type the host name of the service into a browser, or use curl, to verify it's deployed and everything is working:
+
+```bash
+$ curl http://httpbin.davecheney.com/get
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Content-Length": "0",
+ "Host": "htpbin.davecheney.com",
+ "User-Agent": "curl/7.58.0",
+ "X-Envoy-Expected-Rq-Timeout-Ms": "15000",
+ "X-Envoy-Internal": "true"
+ },
+ "origin": "10.152.0.2",
+ "url": "http://httpbin.davecheney.com/get"
+}
+```
+
+Excellent, it looks like everything is up and running serving traffic over HTTP.
+
+### Request a TLS certificate from Let's Encrypt
+
+Now it's time to use cert-manager to request a TLS certificate from Let's Encrypt.
+Do this by adding some annotations and a `tls:` section to the Ingress spec.
+
+We need to add the following annotations:
+
+- `cert-manager.io/cluster-issuer: letsencrypt-staging`: tells cert-manager to use the `letsencrypt-staging` cluster issuer you just created.
+- `kubernetes.io/tls-acme: "true"`: Tells cert-manager to do ACME TLS (what Let's Encrypt uses).
+- `ingress.kubernetes.io/force-ssl-redirect: "true"`: tells Contour to redirect HTTP requests to the HTTPS site.
+- `kubernetes.io/ingress.class: contour`: Tells Contour that it should handle this Ingress object.
+
+Using `kubectl edit ingress httpbin`:
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: httpbin
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt-staging
+ ingress.kubernetes.io/force-ssl-redirect: "true"
+ kubernetes.io/ingress.class: contour
+ kubernetes.io/tls-acme: "true"
+spec:
+ tls:
+ - secretName: httpbin
+ hosts:
+ - httpbin.davecheney.com
+ rules:
+ - host: httpbin.davecheney.com
+ http:
+ paths:
+ - pathType: Prefix
+ path: /
+ backend:
+ service:
+ name: httpbin
+ port:
+ number: 8080
+```
+
+The certificate is issued in the name of the hosts listed in the `tls:` section, `httpbin.davecheney.com` and stored in the secret `httpbin`.
+Behind the scenes, cert-manager creates a certificate CRD to manage the lifecycle of the certificate, and then a series of other CRDs to handle the challenge process.
+
+You can watch the progress of the certificate as it's issued:
+
+```bash
+$ kubectl describe certificate httpbin | tail -n 12
+Status:
+ Conditions:
+ Last Transition Time: 2019-11-07T00:37:55Z
+ Message: Waiting for CertificateRequest "httpbinproxy-1925286939" to complete
+ Reason: InProgress
+ Status: False
+ Type: Ready
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ Normal GeneratedKey 26s cert-manager Generated a new private key
+ Normal Requested 26s cert-manager Created new CertificateRequest resource "httpbinproxy-1925286939"
+```
+
+Wait for the certificate to be issued:
+
+```bash
+$ kubectl describe certificate httpbin | grep -C3 "Certificate is up to date"
+Status:
+ Conditions:
+ Last Transition Time: 2019-11-06T23:47:50Z
+ Message: Certificate is up to date and has not expired
+ Reason: Ready
+ Status: True
+ Type: Ready
+```
+
+A `kubernetes.io/tls` secret is created with the `secretName` specified in the `tls:` field of the Ingress.
+
+```bash
+$ kubectl get secret httpbin
+NAME TYPE DATA AGE
+httpbin kubernetes.io/tls 2 3m
+```
+
+cert-manager manages the contents of the secret as long as the Ingress is present in your cluster.
+
+You can now visit your site, replacing `http://` with `https://` — and you get a huge security warning!
+This is because the certificate was issued by the Let's Encrypt staging servers and has a fake CA.
+This is so you can't accidentally use the staging servers to serve real certificates.
+
+```bash
+$ curl https://httpbin.davecheney.com/get
+curl: (60) SSL certificate problem: unable to get local issuer certificate
+More details here: https://curl.haxx.se/docs/sslcerts.html
+
+curl failed to verify the legitimacy of the server and therefore could not
+establish a secure connection to it. To learn more about this situation and
+how to fix it, please visit the web page mentioned above.
+```
+
+### Switch to Let's Encrypt Production
+
+To request a properly signed certificate from the Let's Encrypt production servers, we create a new `ClusterIssuer`, as before but with some modifications.
+
+Create a file called `letsencrypt-prod.yaml` with the following contents:
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+ name: letsencrypt-prod
+ namespace: cert-manager
+spec:
+ acme:
+ email: user@example.com
+ privateKeySecretRef:
+ name: letsencrypt-prod
+ server: https://acme-v02.api.letsencrypt.org/directory
+ solvers:
+ - http01:
+ ingress:
+ class: contour
+```
+
+again replacing `user@example.com` with your email address.
+
+Deploy:
+
+```bash
+$ kubectl apply -f letsencrypt-prod.yaml
+clusterissuer.cert-manager.io/letsencrypt-prod created
+```
+
+Now we use `kubectl edit ingress httpbin` to edit our Ingress to ask for a real certificate from `letsencrypt-prod`:
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: httpbin
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt-prod
+spec:
+ ...
+```
+
+The certificate resource will transition to `Ready: False` while it's re-provisioned from the Let's Encrypt production servers, and then back to `Ready: True` once it's been provisioned:
+
+```bash
+$ kubectl describe certificate httpbin
+...
+Events:
+ Type Reason Age From Message
+ ---- ------ ---- ---- -------
+ ...
+ Normal Issuing 21s cert-manager Issuing certificate as Secret was previously issued by ClusterIssuer.cert-manager.io/letsencrypt-staging
+ Normal Reused 21s cert-manager Reusing private key stored in existing Secret resource "httpbin"
+ Normal Requested 21s cert-manager Created new CertificateRequest resource "httpbin-sjqbt"
+ Normal Issuing 18s (x2 over 48s) cert-manager The certificate has been successfully issued
+```
+
+Followed by:
+
+```bash
+$ kubectl get certificate httpbin -o wide
+NAME READY SECRET ISSUER STATUS AGE
+httpbin True httpbin letsencrypt-prod Certificate is up to date and has not expired 3m35s
+```
+
+Now revisiting our `https://httpbin.davecheney.com` site should show a valid, trusted, HTTPS certificate.
+
+```bash
+$ curl https://httpbin.davecheney.com/get
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Content-Length": "0",
+ "Host": "httpbin.davecheney.com",
+ "User-Agent": "curl/7.58.0",
+ "X-Envoy-Expected-Rq-Timeout-Ms": "15000",
+ "X-Envoy-Internal": "true"
+ },
+ "origin": "10.152.0.2",
+ "url": "https://httpbin.davecheney.com/get"
+}
+```
+
+![httpbin.davecheney.com screenshot][9]
+
+## Making cert-manager work with HTTPProxy
+
+cert-manager currently does not have a way to interact directly with HTTPProxy objects in order to respond to the HTTP01 challenge (See [#950][10] and [#951][11] for details).
+cert-manager, however, can be configured to request certificates automatically using a `Certificate` object.
+
+When cert-manager finds a `Certificate` object, it will implement the HTTP01 challenge by creating a new, temporary Ingress object that will direct requests from Let's Encrypt to temporary pods called 'solver pods'.
+These pods know how to respond to Let's Encrypt's challenge process for verifying you control the domain you're issuing certificates for.
+The Ingress resource as well as the solver pods are short lived and will only be available during the certificate request or renewal process.
+
+The result of the work steps described previously is a TLS secret, which can be referenced by a HTTPProxy.
+
+## Details
+
+To do this, we first need to create our HTTPProxy and Certificate objects.
+
+This example uses the hostname `httpbinproxy.davecheney.com`, remember to create that name before starting.
+
+Firstly, the HTTPProxy:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: httpbinproxy
+spec:
+ virtualhost:
+ fqdn: httpbinproxy.davecheney.com
+ tls:
+ secretName: httpbinproxy
+ routes:
+ - services:
+ - name: httpbin
+ port: 8080
+```
+
+This object will be marked as Invalid by Contour, since the TLS secret doesn't exist yet.
+Once that's done, create the Certificate object:
+
+```yaml
+apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+ name: httpbinproxy
+spec:
+ commonName: httpbinproxy.davecheney.com
+ dnsNames:
+ - httpbinproxy.davecheney.com
+ issuerRef:
+ name: letsencrypt-prod
+ kind: ClusterIssuer
+ secretName: httpbinproxy
+```
+
+Wait for the Certificate to be provisioned:
+
+```bash
+$ kubectl get certificate httpbinproxy -o wide
+NAME READY SECRET ISSUER STATUS AGE
+httpbinproxy True httpbinproxy letsencrypt-prod Certificate is up to date and has not expired 39s
+```
+
+Once cert-manager has fulfilled the HTTP01 challenge, you will have a `httpbinproxy` secret, that will contain the keypair.
+Contour will detect that the Secret exists and generate the HTTPProxy config.
+
+After that, you should be able to curl the new site:
+
+```bash
+$ curl https://httpbinproxy.davecheney.com/get
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Content-Length": "0",
+ "Host": "httpbinproxy.davecheney.com",
+ "User-Agent": "curl/7.54.0",
+ "X-Envoy-Expected-Rq-Timeout-Ms": "15000",
+ "X-Envoy-External-Address": "122.106.57.183"
+ },
+ "origin": "122.106.57.183",
+ "url": "https://httpbinproxy.davecheney.com/get"
+}
+```
+
+## Wrapping up
+
+Now that you've deployed your first HTTPS site using Contour and Let's Encrypt, deploying additional TLS enabled services is much simpler.
+Remember that for each HTTPS website you deploy, cert-manager will create a Certificate CRD that provides the domain name and the name of the target Secret.
+The TLS functionality will be enabled when the HTTPProxy contains the `tls:` stanza, and the referenced secret contains a valid keypair.
+
+See the [cert-manager docs][12] for more information.
+
+## Bonus points
+
+For bonus points, you can use a feature of Contour to automatically upgrade any HTTP request to the corresponding HTTPS site so you are no longer serving any traffic over insecure HTTP.
+
+To enable the automatic redirect from HTTP to HTTPS, add this annotation to your Ingress object.
+
+```
+metadata:
+ annotations:
+ ingress.kubernetes.io/force-ssl-redirect: "true"
+```
+Now any requests to the insecure HTTP version of your site get an unconditional 301 redirect to the HTTPS version:
+
+```
+$ curl -v http://httpbin.davecheney.com/get
+* Trying 35.189.26.87…
+* TCP_NODELAY set
+* Connected to httpbin.davecheney.com (35.189.26.87) port 80 (#0)
+> GET /get HTTP/1.1
+> Host: httpbin.davecheney.com
+> User-Agent: curl/7.58.0
+> Accept: */*
+>
+< HTTP/1.1 301 Moved Permanently
+< location: https://httpbin.davecheney.com/get
+< date: Tue, 20 Feb 2018 04:11:46 GMT
+< server: envoy
+< content-length: 0
+<
+* Connection #0 to host httpbin.davecheney.com left intact
+```
+
+__Note:__ For HTTPProxy resources this happens automatically without the need for an annotation.
+
+[0]: {{< param github_url >}}
+[1]: https://github.com/jetstack/cert-manager
+[2]: https://letsencrypt.org/docs/rate-limits/
+[3]: http://httpbin.org/
+[4]: https://docs.cert-manager.io/en/latest/getting-started/install/kubernetes.html
+[5]: https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml
+[6]: https://letsencrypt.org/getting-started/
+[7]: ../deploy-options/#get-your-hostname-or-ip-address
+[8]: /img/cert-manager/httpbinhomepage.png
+[9]: /img/cert-manager/httpbin.png
+[10]: {{< param github_url >}}/issues/950
+[11]: {{< param github_url >}}/issues/951
+[12]: https://cert-manager.io/docs/usage/ingress/
diff --git a/site/content/docs/1.29/guides/deploy-aws-nlb.md b/site/content/docs/1.29/guides/deploy-aws-nlb.md
new file mode 100644
index 00000000000..af3f8df1019
--- /dev/null
+++ b/site/content/docs/1.29/guides/deploy-aws-nlb.md
@@ -0,0 +1,47 @@
+---
+title: Deploying Contour on AWS with NLB
+---
+
+This is an advanced deployment guide to configure Contour on AWS with the [Network Load Balancer (NLB)][1].
+This configuration has several advantages:
+
+1. NLBs are often cheaper. This is especially true for development. Idle LBs do not cost money.
+2. There are no extra network hops. Traffic goes to the NLB, to the node hosting Contour, and then to the target pod.
+3. Source IP addresses are retained. Envoy (running as part of Contour) sees the native source IP address and records this with an `X-Forwarded-For` header.
+
+## Moving parts
+
+- We run Envoy as a DaemonSet across the cluster and Contour as a deployment
+- The Envoy pod runs on host ports 80 and 443 on the node
+- Host networking means that traffic hits Envoy without transitioning through any other fancy networking hops
+- Contour also binds to 8001 for Envoy->Contour config traffic.
+
+## Deploying Contour
+
+1. [Clone the Contour repository][4] and cd into the repo
+2. Edit the Envoy service (`02-service-envoy.yaml`) in the `examples/contour` directory:
+ - Remove the existing annotation: `service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp`
+ - Add the following annotation: `service.beta.kubernetes.io/aws-load-balancer-type: nlb`
+3. Run `kubectl apply -f examples/contour`
+
+This creates the `projectcontour` Namespace along with a ServiceAccount, RBAC rules, Contour Deployment and an Envoy DaemonSet.
+It also creates the NLB based loadbalancer for you.
+
+You can get the address of your NLB via:
+
+```
+$ kubectl get service envoy --namespace=projectcontour -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'
+```
+
+## Test
+
+You can now test your NLB.
+
+1. Install a workload (see the kuard example in the [main deployment guide][2]).
+2. Look up the address for your NLB in the AWS console and enter it in your browser.
+ - Notice that Envoy fills out `X-Forwarded-For`, because it was the first to see the traffic directly from the browser.
+
+[1]: https://aws.amazon.com/blogs/aws/new-network-load-balancer-effortless-scaling-to-millions-of-requests-per-second/
+[2]: ../deploy-options/#testing-your-installation
+[3]: https://github.com/kubernetes/kubernetes/issues/52173
+[4]: {{< param github_url >}}/tree/{{< param branch >}}
diff --git a/site/content/docs/1.29/guides/deploy-aws-tls-nlb.md b/site/content/docs/1.29/guides/deploy-aws-tls-nlb.md
new file mode 100644
index 00000000000..7f4f83f685e
--- /dev/null
+++ b/site/content/docs/1.29/guides/deploy-aws-tls-nlb.md
@@ -0,0 +1,135 @@
+---
+title: AWS Network Load Balancer TLS Termination with Contour
+---
+
+## Motivation
+
+![diagram illustrating connection between network load balancer and contour](/img/aws-nlb-tls/fig.jpg){:class="img-fluid"}
+
+Managing TLS certificates (and related configuration) for production cluster workloads is both time consuming, and high risk. For example, storing multiple copies of a certificate secret key in the cluster may increases the chances of it being compromised. Additionally, TLS can be complicated to configure and implement properly.
+
+Traditionally, TLS termination at the load balancer step required using more expensive application load balancers (ALBs). AWS introduced TLS termination for network load balancers (NLBs) for enhanced security and cost effectiveness.
+
+The TLS implementation used by the AWS NLB is formally verified and maintained. Additionally, AWS Certificate Manager (ACM) is used, fully isolating your cluster from access to the private key.
+
+## Solution Overview
+
+An external client transmits a request to the NLB. The request is encrypted with TLS using the production (e.g., client facing) certificate, and on port 443.
+
+The NLB decrypts the request, and transmits it on to Envoy running in your cluster on port 8080. It follows the standard request routing configured within the cluster. Notably, the request received within the cluster includes the actual origin IP address of the external client.
+
+Alternate ports may be configured. End-to-end encryption technically requires the segment between the NLB and cluster pods be encrypted also. A follow-up post will describe the NLB originating TLS based on a cluster certificate.
+
+## Steps
+
+### Prerequisites
+
+1. Access to DNS records for domain name.
+
+[Review the docs on registering domains with AWS's Route 53.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/registrar.html)
+
+An alternate DNS provider may be used, such as Google Domains or Namecheap.
+
+Later, a subdomain (e.g., demo-service.gcline.us) will be created, pointing to the NLB. Additionally, access to the DNS records is required to generate a TLS certificate for use by the NLB.
+
+3. Verify [Contour is installed in the cluster.](https://projectcontour.io/getting-started/)
+
+4. Install [AWS Load Balancer Controller.]( https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/deploy/installation/)
+
+Generally, setting up the Load Balancer Controller has two steps: enabling IAM roles for service accounts, and adding the controller to the cluster. The IAM role allows the controller in the Kubernetes cluster to manage AWS resources. [Learn more about IAM roles for service accounts.](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html)
+
+### Configure
+
+1. Generate TLS Certificate
+
+Create a public TLS certificate for the domain using AWS Certificate Manager (ACM). This is streamlined when the domain is managed by Route 53. Review the [AWS Certificate Manager Docs.](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html#request-public-console)
+
+The domain name on the TLS certificate must correspond to the planned domain name for the kubernetes service. The domain name may be specified explicitly (e.g., tls-demo.gcline.us), or a wildcard certificate can be used (e.g., *.gcline.us).
+
+If the domain is registered with Route53, the TLS certificate request will automatically be approved. Otherwise, follow ACM console the instructions to create a DNS record to validate the domain.
+
+After validation, the certificate will be available for use in your AWS account.
+
+Note the ARN of the certificate, which uniquely identifies it in kubernetes config files.
+
+![screenshot indicating location of ARN value in web console](/img/aws-nlb-tls/acm-arn.png){:class="img-fluid"}
+
+2. Create Envoy Service with new NLB
+
+Contour expects a kubernetes service pointing to Envoy. Add annotations to the service to enable NLB TLS termination, before the traffic reaches Envoy. The annotations are actioned by the load balancer controller. [Review all the NLB annotations on GitHub.](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/)
+
+| annotation name | value | meaning |
+| ----- | --- | ----- |
+| service.beta.kubernetes.io/aws-load-balancer-type | external | explicitly requires an NLB, instead of an ALB |
+| service.beta.kubernetes.io/aws-load-balancer-nlb-target-type | ip | route traffic directly to the pod IP |
+| service.beta.kubernetes.io/aws-load-balancer-scheme | internet-facing | An internet-facing load balancer has a publicly resolvable DNS name |
+| service.beta.kubernetes.io/aws-load-balancer-ssl-cert | "arn:aws:acm:..." | identifies the TLS certificate used by the NLB |
+| service.beta.kubernetes.io/aws-load-balancer-ssl-ports | 443 | determines the port the NLB should listen for TLS traffic on|
+
+Example:
+
+```
+apiVersion: v1
+kind: Service
+metadata:
+ name: envoy
+ namespace: projectcontour
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: external
+ service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
+ service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
+ service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "arn:aws:acm:us-east-2:185309785115:certificate/7610ed7d-5a81-4ea2-a18a-7ba1606cca3e"
+ service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443"
+spec:
+ externalTrafficPolicy: Local
+ ports:
+ - port: 443
+ targetPort: 8080
+ name: http
+ protocol: TCP
+ selector:
+ app: envoy
+ type: LoadBalancer
+```
+
+*Note:* Don't modify an existing service to add NLB TLS termination. This may result in unexpected behavior, such as duplicate NLB resources or incorrect NLB configuration.
+
+3. Configure DNS
+
+**Get domain name using kubectl.**
+
+The service name and namespace were defined above.
+
+```
+kubectl get svc envoy --namespace projectcontour
+```
+
+```
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+envoy LoadBalancer 10.100.24.154 a7ea2bbde8a164036a7e4c1ed5700cdf-154fb911d990bb1f.elb.us-east-2.amazonaws.com 443:31606/TCP 40d
+```
+
+Note the last 4 digits of the domain name for the NLB. For example, "bb1f".
+
+**Setup DNS alias for NLB**
+
+Create a DNS record pointing from a friendly name (e.g., tls-demo.gcline.us) to the NLB domain (e.g., bb1f.elb.us-east-2.amazonaws.com).
+
+For AWS's Route 53, follow the instructions below. If you use a different DNS provider, follow their instructions for [creating a CNAME record](https://docs.digitalocean.com/products/networking/dns/how-to/manage-records/#cname-records).
+
+First, create a new record in Route 53.
+
+Use the "A" record type, and enable the ["alias" option.](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-alias.html) This option attaches the DNS record to the AWS resource, without requiring an extra lookup step for clients.
+
+Select the NLB resource. Double check the region, and use the last 4 digits (noted earlier) to select the proper resource.
+
+![screenshot of Route 53 New Record Console](/img/aws-nlb-tls/record.png){:class="img-fluid"}
+
+### Verify
+
+Attempt to access the NLB domain at port 443 with HTTPS/TLS. Is the connection successful? What certificate is used? Does it reach the expected endpoint within the cluster?
+
+### Next Steps
+
+Create a second TLS certificate within the cluster, for securing connections between the NLB and pods. A guide on this topic is forthcoming.
+
diff --git a/site/content/docs/1.29/guides/external-authorization.md b/site/content/docs/1.29/guides/external-authorization.md
new file mode 100644
index 00000000000..74076ede518
--- /dev/null
+++ b/site/content/docs/1.29/guides/external-authorization.md
@@ -0,0 +1,538 @@
+---
+title: External Authorization Support
+---
+
+Starting in version 1.9, Contour supports routing client requests to an
+external authorization server. This feature can be used to centralize
+client authorization so that applications don't have to implement their
+own authorization mechanisms.
+
+## Authorization Architecture
+
+An external authorization server is a server that implements the Envoy
+external authorization [GRPC protocol][3]. Contour supports any server
+that implements this protocol.
+
+You can bind an authorization server to Contour by creating a
+[`ExtensionService`][4] resource.
+This resource tells Contour the service exists, and that it should
+program Envoy with an upstream cluster directing traffic to it.
+Note that the `ExtensionService` resource just binds the server; at this
+point Contour doesn't assume that the server is an authorization server.
+
+Once you have created `ExtensionService` resource, you can bind it to a
+particular application by referencing it in a [`HTTPProxy`][5] resource.
+In the `virtualhost` field, a new `authorization` field specifies the name
+of an `ExtensionService` to bind for the virtual host.
+When you specify a resource name here, Contour will program Envoy to
+send authorization checks to the extension service cluster before routing
+the request to the upstream application.
+
+## Authorization Request Flow
+
+It is helpful to have a mental model of how requests flow through the various
+servers involved in authorizing HTTP requests.
+The flow diagram below shows the actors that participate in the successful
+authorization of an HTTP request.
+Note that in some cases, these actors can be combined into a single
+application server.
+For example, there is no requirement for the external authorization server to
+be a separate application from the authorization provider.
+
+
+
+
+
+
+A HTTP Client generates an HTTP request and sends it to
+an Envoy instance that Contour has programmed with an external
+authorization configuration.
+Envoy holds the HTTP request and sends an authorization check request
+to the Authorization server that Contour has bound to the virtual host.
+The Authorization server may be able to verify the request locally, but in
+many cases it will need to make additional requests to an Authorization
+Provider server to verify or obtain an authorization token.
+
+In this flow, the ExtAuth server is able to authorize the request, and sends an
+authorization response back to the Proxy.
+The response includes the authorization status, and a set of HTTP headers
+modifications to make to the HTTP request.
+Since this authorization was successful, the Proxy modifies the request and
+forwards it to the application.
+If the authorization was not successful, the Proxy would have immediately
+responded to the client with an HTTP error.
+
+## Using the Contour Authorization Server
+
+The Contour project has built a simple authorization server named
+[`contour-authserver`][1]. `contour-authserver` supports an authorization
+testing server, and an HTTP basic authorization server that accesses
+credentials stored in [htpasswd][2] format.
+
+To get started, ensure that Contour is deployed and that you have
+[cert-manager][6] installed in your cluster so that you can easily issue
+self-signed TLS certificates.
+
+At this point, we should also create a cluster-wide self-signed certificate
+issuer, just to make it easier to provision TLS certificates later:
+
+```bash
+$ kubectl apply -f - <` with the appropriate version of Go and BoringCrypto, see [here][10] for version specifics):
+
+```bash
+make container BUILD_CGO_ENABLED=1 BUILD_BASE_IMAGE=goboring/golang: BUILD_EXTRA_GO_LDFLAGS="-linkmode=external -extldflags=-static"
+```
+
+The command above can be broken down as follows:
+- `make container` invokes the container image build target
+- `BUILD_CGO_ENABLED=1` ensures `cgo` is enabled in the Contour compilation process
+- `BUILD_BASE_IMAGE=goboring/golang:` ensures we use the BoringCrypto flavor of Go
+- `BUILD_EXTRA_GO_LDFLAGS` contains the additional linker flags we need to perform a static build
+ - `-linkmode=external` tells the Go linker to use an external linker
+ - `-extldflags=-static"` passes the `-static` flag to the external link to ensure a statically linked executable is produced
+
+The container image build process should fail before export of the `contour` binary to the final image if the compiled binary is not statically linked.
+
+### Validation
+
+To be fully sure the produced `contour` binary has been compiled with BoringCrypto you must remove the `-s` flag from the base Contour `Makefile` to stop stripping symbols and run through the build process above.
+Then you will be able to inspect the `contour` binary with `go tool nm` to check for symbols containing the string `_Cfunc__goboringcrypto_`.
+Also, you can use the program [rsc.io/goversion][21]. It will report the crypto implementation used by a given binary when invoked with the `-crypto` flag.
+
+Once you have a `projectcontour/contour` image built, you can re-tag it if needed, push the image to a registry, and reference it in a Contour deployment to use it!
+
+## Building Envoy
+
+Envoy has support for building in a FIPS compliant mode as [documented here][11].
+The upstream project does not distribute a FIPS compliant Envoy container image, but combining the documented process with the processes for building the Envoy executable and container image, we can produce one.
+
+Again we will need the Envoy source code checked out to the version to build and Docker installed on your computer.
+The simplest way to build Envoy without having to learn [Bazel][12] and set up a C++ toolchain on your computer is to build using the Envoy build container image which contains the necessary tools pre-installed.
+Note that if you do build with FIPS mode outside of the build container, you can only do so on a Linux-amd64 architecture.
+
+We can first compile the Envoy binary by running the following in a `bash` shell from the Envoy source directory:
+
+```bash
+BAZEL_BUILD_EXTRA_OPTIONS="--define boringssl=fips" ENVOY_DOCKER_BUILD_DIR= ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release //test/exe:envoy_static_test'
+```
+
+*This command mimics the Envoy release CI process with the target `bazel.release` but differs in only running a single test for brevity. You may omit the `//test/exe:envoy_static_test` test entirely to run the full suite of Envoy tests.*
+
+Replace `` with a directory you would like the build output to be placed on your host computer.
+
+Once that build completes, you should have a file named `release.tar.zst` in your specified output directory.
+This file is a [Zstandard](https://github.com/facebook/zstd) compressed archive containing the compiled Envoy release and debug binaries.
+If you would like to build an image with Envoy according to your own specifications, you can unpack the resulting archive and you will find a stripped Envoy binary in the root and an unstripped Envoy binary with debug info in the `dbg` directory.
+
+To build an image matching the canonical Envoy upstream release image ([`envoyproxy/envoy`][13]), run the following:
+
+*Note: You will need a recent version of Docker/BuildKit that supports Zstandard decompression.*
+
+```bash
+# Make ./linux/amd64 directories.
+mkdir -p ./linux/amd64
+# Copy Zstandard archive from build step.
+cp -a /envoy/x64/bin/release.tar.zst ./linux/amd64/release.tar.zst
+# Run the Docker image build.
+docker build -f ./ci/Dockerfile-envoy --target envoy .
+```
+
+Once you have an image built, you can tag it as needed, push the image to a registry, and use it in an Envoy deployment.
+
+## Configuring TLS Ciphers
+
+Now that we have Contour and Envoy compiled with BoringCrypto, we can turn our attention to ensuring encrypted communication paths in Contour are configured to use FIPS approved cryptographic algorithms.
+Using a FIPS flavor of Envoy will do most of the heavy lifting here without any user configuration needed.
+
+The critical communication paths and how they are set up to be FIPS compliant are enumerated below:
+- Contour -> k8s API
+ - Contour uses [`client-go`][14] to communicate with the k8s API
+ - `client-go` uses the default Golang cipher suites configuration
+ - When compiled with BoringCrypto Go, this set of ciphers is FIPS compliant and not configurable by users
+- Envoy -> Contour xDS Server, extension services, upstream services
+ - A FIPS compliant build of Envoy will choose FIPS approved TLS ciphers when negotiating TLS 1.2 as documented [here][15]
+ - The set of ciphers is not configurable
+- TLS client -> Envoy
+ - As of [Contour 1.13.0][16], the ciphers Envoy will accept as a server when negotiating TLS 1.2 are configurable
+ - The [default set of ciphers Contour configures][17] includes some ciphers that are not FIPS approved
+ - Users must configure FIPS approved ciphers from the list [here][15]
+
+[0]: https://csrc.nist.gov/publications/detail/fips/140/2/final
+[1]: https://csrc.nist.gov/projects/testing-laboratories
+[2]: https://boringssl.googlesource.com/boringssl/
+[3]: https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md
+[4]: https://go.googlesource.com/go/+/dev.boringcrypto/README.boringcrypto.md
+[5]: https://hub.docker.com/r/projectcontour/contour
+[6]: https://www.gnu.org/software/make/
+[7]: https://www.docker.com/
+[8]: {{< param github_url >}}/blob/main/Dockerfile
+[9]: https://hub.docker.com/r/goboring/golang/
+[10]: https://go.googlesource.com/go/+/dev.boringcrypto/misc/boring/README.md#version-strings
+[11]: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl.html#fips-140-2
+[12]: https://bazel.build/
+[13]: https://hub.docker.com/r/envoyproxy/envoy
+[14]: https://github.com/kubernetes/client-go
+[15]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/transport_sockets/tls/v3/common.proto#envoy-v3-api-field-extensions-transport-sockets-tls-v3-tlsparameters-cipher-suites
+[16]: https://github.com/projectcontour/contour/releases/tag/v1.13.0
+[17]: https://pkg.go.dev/github.com/projectcontour/contour/pkg/config#pkg-variables
+[18]: https://pkg.go.dev/internal/goexperiment@go1.19
+[19]: https://go-boringcrypto.storage.googleapis.com/
+[20]: https://go.googlesource.com/go/+/dev.boringcrypto/misc/boring/README.md#releases
+[21]: https://godoc.org/rsc.io/goversion
\ No newline at end of file
diff --git a/site/content/docs/1.29/guides/gatekeeper.md b/site/content/docs/1.29/guides/gatekeeper.md
new file mode 100644
index 00000000000..fa6d01cab41
--- /dev/null
+++ b/site/content/docs/1.29/guides/gatekeeper.md
@@ -0,0 +1,456 @@
+---
+title: Using Gatekeeper as a validating admission controller with Contour
+---
+
+This tutorial demonstrates how to use [Gatekeeper](https://github.com/open-policy-agent/gatekeeper) as a validating admission controller for Contour.
+
+Gatekeeper is a project that enables users to define flexible policies for Kubernetes resources using [Open Policy Agent (OPA)](https://www.openpolicyagent.org/) that are enforced when those resources are created/updated via the Kubernetes API.
+
+The benefits of using Gatekeeper with Contour are:
+- Immediate feedback for the user when they try to create an `HTTPProxy` with an invalid spec. Instead of having to check the `HTTPProxy`'s status after creation for a possible error message, the create is rejected and the user is immediately provided with a reason for the rejection.
+- User-defined policies for `HTTPProxy` specs. For example, the Contour admin can define policies to enforce maximum limits on timeouts and retries, disallow certain FQDNs, etc.
+
+## Prerequisites
+
+- A Kubernetes cluster with a minimum version of 1.14 (to enable webhook timeouts for Gatekeeper).
+- Cluster-admin permissions
+
+## Deploy Contour
+
+Run:
+
+```bash
+$ kubectl apply -f {{< param base_url >}}/quickstart/contour.yaml
+```
+
+This creates a `projectcontour` namespace and sets up Contour as a deployment and Envoy as a daemonset, with communication between them secured by mutual TLS.
+
+Check the status of the Contour pods with this command:
+
+```bash
+$ kubectl -n projectcontour get pods -l app=contour
+NAME READY STATUS RESTARTS AGE
+contour-8596d6dbd7-9nrg2 1/1 Running 0 32m
+contour-8596d6dbd7-mmtc8 1/1 Running 0 32m
+```
+
+If installation was successful, all pods should reach `Running` status shortly.
+
+## Deploy Gatekeeper
+
+The following instructions are summarized from the [Gatekeeper documentation](https://github.com/open-policy-agent/gatekeeper#installation-instructions).
+If you already have Gatekeeper running in your cluster, you can skip this section.
+
+Run:
+
+```bash
+$ kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/master/deploy/gatekeeper.yaml
+```
+
+This creates a `gatekeeper-system` namespace and sets up the Gatekeeper controller manager and audit deployments using the latest Gatekeeper release.
+
+Check the status of the Gatekeeper pods with this command:
+
+```bash
+$ kubectl -n gatekeeper-system get pods
+NAME READY STATUS RESTARTS AGE
+gatekeeper-audit-67dfc46db6-kjcmc 1/1 Running 0 40m
+gatekeeper-controller-manager-7cbc758844-64hhn 1/1 Running 0 40m
+gatekeeper-controller-manager-7cbc758844-c4dkd 1/1 Running 0 40m
+gatekeeper-controller-manager-7cbc758844-xv9jn 1/1 Running 0 40m
+```
+
+If installation was successful, all pods should reach `Running` status shortly.
+
+## Configure Gatekeeper
+
+### Background
+
+Gatekeeper uses the [OPA Constraint Framework](https://github.com/open-policy-agent/frameworks/tree/master/constraint) to define and enforce policies.
+This framework has two key types: `ConstraintTemplate` and `Constraint`.
+A `ConstraintTemplate` defines a reusable OPA policy, along with the parameters that can be passed to it when it is instantiated.
+When a `ConstraintTemplate` is created, Gatekeeper automatically creates a custom resource definition (CRD) to represent it in the cluster.
+
+A `Constraint` is an instantiation of a `ConstraintTemplate`, which tells Gatekeeper to apply it to specific Kubernetes resource types (e.g. `HTTPProxy`) and provides any relevant parameter values.
+A `Constraint` is defined as an instance of the CRD representing the associated `ConstraintTemplate`.
+
+We'll now look at some examples to make these concepts concrete.
+
+### Configure resource caching
+
+First, Gatekeeper needs to be configured to store all `HTTPProxy` resources in its internal cache, so that existing `HTTPProxy` resources can be referenced within constraint template policies.
+This is essential for being able to define constraints that look across all `HTTPProxies` -- for example, to verify FQDN uniqueness.
+
+Create a file called `config.yml` containing the following YAML:
+
+```yaml
+apiVersion: config.gatekeeper.sh/v1alpha1
+kind: Config
+metadata:
+ name: config
+ namespace: "gatekeeper-system"
+spec:
+ sync:
+ syncOnly:
+ - group: "projectcontour.io"
+ version: "v1"
+ kind: "HTTPProxy"
+```
+
+Apply it to the cluster:
+
+```bash
+$ kubectl apply -f config.yml
+```
+
+Note that if you already had Gatekeeper running in your cluster, you may already have the `Config` resource defined.
+In that case, you'll need to edit the existing resource to add `HTTPProxy` to the `spec.sync.syncOnly` list.
+
+### Configure HTTPProxy validations
+
+The first constraint template and constraint that we'll define are what we'll refer to as a **validation**.
+These are rules for `HTTPProxy` specs that Contour universally requires to be true.
+In this example, we'll define a constraint template and constraint to enforce that all `HTTPProxies` must have a unique FQDN.
+
+Create a file called `unique-fqdn-template.yml` containing the following YAML:
+
+```yaml
+apiVersion: templates.gatekeeper.sh/v1beta1
+kind: ConstraintTemplate
+metadata:
+ name: httpproxyuniquefqdn
+spec:
+ crd:
+ spec:
+ names:
+ kind: HTTPProxyUniqueFQDN
+ listKind: HTTPProxyUniqueFQDNList
+ plural: HTTPProxyUniqueFQDNs
+ singular: HTTPProxyUniqueFQDN
+ targets:
+ - target: admission.k8s.gatekeeper.sh
+ rego: |
+ package httpproxy.uniquefqdn
+
+ violation[{"msg": msg, "other": sprintf("%v/%v", [other.metadata.namespace, other.metadata.name])}] {
+ got := input.review.object.spec.virtualhost.fqdn
+ other := data.inventory.namespace[_]["projectcontour.io/v1"]["HTTPProxy"][_]
+ other.spec.virtualhost.fqdn = got
+
+ not same(other, input.review.object)
+ msg := "HTTPProxy must have a unique spec.virtualhost.fqdn"
+ }
+
+ same(a, b) {
+ a.metadata.namespace == b.metadata.namespace
+ a.metadata.name == b.metadata.name
+ }
+```
+
+Apply it to the cluster:
+
+```bash
+$ kubectl apply -f unique-fqdn-template.yml
+```
+
+Within a few seconds, you'll see that a corresponding CRD has been created in the cluster:
+
+```bash
+$ kubectl get crd httpproxyuniquefqdn.constraints.gatekeeper.sh
+NAME CREATED AT
+httpproxyuniquefqdn.constraints.gatekeeper.sh 2020-08-13T16:08:57Z
+```
+
+Now, create a file called `unique-fqdn-constraint.yml` containing the following YAML:
+
+```yaml
+apiVersion: constraints.gatekeeper.sh/v1beta1
+kind: HTTPProxyUniqueFQDN
+metadata:
+ name: httpproxy-unique-fqdn
+spec:
+ match:
+ kinds:
+ - apiGroups: ["projectcontour.io"]
+ kinds: ["HTTPProxy"]
+```
+
+Note that the `Kind` of this resource corresponds to the new CRD.
+
+Apply it to the cluster:
+
+```bash
+$ kubectl apply -f unique-fqdn-constraint.yml
+```
+
+Now, let's create some `HTTPProxies` to see the validation in action.
+
+Create a file called `httpproxies.yml` containing the following YAML:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: demo
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: demo.projectcontour.io
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: demo2
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: demo.projectcontour.io
+```
+
+Note that both `HTTPProxies` have the same FQDN.
+
+Apply the YAML:
+
+```bash
+$ kubectl apply -f httpproxies.yml
+```
+
+You should see something like:
+```
+httpproxy.projectcontour.io/demo created
+Error from server ([denied by httpproxy-unique-fqdn] HTTPProxy must have a unique FQDN): error when creating "httpproxies.yml": admission webhook "validation.gatekeeper.sh" denied the request: [denied by httpproxy-unique-fqdn] HTTPProxy must have a unique FQDN
+```
+
+The first `HTTPProxy` was created successfully, because there was not already an existing proxy with the `demo.projectcontour.io` FQDN.
+However, when the second `HTTPProxy` was submitted, Gatekeeper rejected its creation because it used the same FQDN as the first one.
+
+### Configure HTTPProxy policies
+
+The next constraint template and constraint that we'll create are what we refer to as a **policy**.
+These are rules for `HTTPProxy` specs that an individual Contour administrator may want to enforce for their cluster, but that are not explicitly required by Contour itself.
+In this example, we'll define a constraint template and constraint to enforce that all `HTTPProxies` can be configured with at most five retries for any route.
+
+Create a file called `retry-count-range-template.yml` containing the following YAML:
+
+```yaml
+apiVersion: templates.gatekeeper.sh/v1beta1
+kind: ConstraintTemplate
+metadata:
+ name: httpproxyretrycountrange
+spec:
+ crd:
+ spec:
+ names:
+ kind: HTTPProxyRetryCountRange
+ listKind: HTTPProxyRetryCountRangeList
+ plural: HTTPProxyRetryCountRanges
+ singular: HTTPProxyRetryCountRange
+ scope: Namespaced
+ validation:
+ openAPIV3Schema:
+ properties:
+ min:
+ type: integer
+ max:
+ type: integer
+ targets:
+ - target: admission.k8s.gatekeeper.sh
+ rego: |
+ package httpproxy.retrycountrange
+
+ # build a set of all the retry count values
+ retry_counts[val] {
+ val := input.review.object.spec.routes[_].retryPolicy.count
+ }
+
+ # is there a retry count value that's greater than the allowed max?
+ violation[{"msg": msg}] {
+ retry_counts[_] > input.parameters.max
+ msg := sprintf("retry count must be less than or equal to %v", [input.parameters.max])
+ }
+
+ # is there a retry count value that's less than the allowed min?
+ violation[{"msg": msg}] {
+ retry_counts[_] < input.parameters.min
+ msg := sprintf("retry count must be greater than or equal to %v", [input.parameters.min])
+ }
+```
+
+Apply it to the cluster:
+
+```bash
+$ kubectl apply -f retry-count-range-template.yml
+```
+
+Again, within a few seconds, you'll see that a corresponding CRD has been created in the cluster:
+
+```bash
+$ kubectl get crd httpproxyretrycountrange.constraints.gatekeeper.sh
+NAME CREATED AT
+httpproxyretrycountrange.constraints.gatekeeper.sh 2020-08-13T16:12:10Z
+```
+
+Now, create a file called `retry-count-range-constraint.yml` containing the following YAML:
+
+```yaml
+apiVersion: constraints.gatekeeper.sh/v1beta1
+kind: HTTPProxyRetryCountRange
+metadata:
+ name: httpproxy-retry-count-range
+spec:
+ match:
+ kinds:
+ - apiGroups: ["projectcontour.io"]
+ kinds: ["HTTPProxy"]
+ namespaces:
+ - my-namespace
+ parameters:
+ max: 5
+```
+
+Note that for this `Constraint`, we've added a `spec.match.namespaces` field which defines that this policy should only be applied to `HTTPProxies` created in the `my-namespace` namespace.
+If this `namespaces` matcher is not specified, then the `Constraint` applies to all namespaces.
+You can read more about `Constraint` matchers on the [Gatekeeper website](https://github.com/open-policy-agent/gatekeeper#constraints).
+
+Apply it to the cluster:
+
+```bash
+$ kubectl apply -f retry-count-range-constraint.yml
+```
+
+Now, let's create some `HTTPProxies` to see the policy in action.
+
+Create a namespace called `my-namespace`:
+
+```bash
+$ kubectl create namespace my-namespace
+namespace/my-namespace created
+```
+
+Create a file called `httpproxy-retries.yml` containing the following YAML:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: demo-retries
+ namespace: my-namespace
+spec:
+ virtualhost:
+ fqdn: retries.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /foo
+ services:
+ - name: s1
+ port: 80
+ retryPolicy:
+ count: 6
+```
+
+Apply the YAML:
+
+```bash
+$ kubectl apply -f httpproxy-retries.yml
+```
+
+You should see something like:
+```
+Error from server ([denied by httpproxy-retry-count-range] retry count must be less than or equal to 5): error when creating "proxy-retries.yml": admission webhook "validation.gatekeeper.sh" denied the request: [denied by httpproxy-retry-count-range] retry count must be less than or equal to 5
+```
+
+Now, change the `count` field on the last line of `httpproxy-retries.yml` to have a value of `5`. Save the file, and apply it again:
+
+```bash
+$ kubectl apply -f httpproxy-retries.yml
+```
+
+Now the `HTTPProxy` creates successfully*.
+
+_* Note that the HTTPProxy is still marked invalid by Contour after creation because the service `s1` does not exist, but that's outside the scope of this guide._
+
+Finally, create a file called `httpproxy-retries-default.yml` containing the following YAML:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: demo-retries
+ namespace: default
+spec:
+ virtualhost:
+ fqdn: default.retries.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /foo
+ services:
+ - name: s1
+ port: 80
+ retryPolicy:
+ count: 6
+```
+
+Remember that our `Constraint` was defined to apply only to the `my-namespace` namespace, so it should not block the creation of this proxy, even though it has a retry policy count outside the allowed range.
+
+Apply the YAML:
+
+```bash
+$ kubectl apply -f httpproxy-retries-default.yml
+```
+
+The `HTTPProxy` creates successfully.
+
+## Gatekeeper Audit
+
+We've seen how Gatekeeper constraints can enforce constraints when a user tries to create a new `HTTPProxy`. Now let's look at how constraints can be applied to pre-existing resources in the cluster.
+
+Gatekeeper has an audit functionality, that periodically (every `60s` by default) checks all existing resources against the relevant set of constraints. Any violations are reported in the `Constraint` custom resource's `status.violations` field. This allows an administrator to periodically review & correct any pre-existing misconfigurations, while not having to worry about breaking existing resources when rolling out a new or updated constraint.
+
+To try this out, let's revisit the previous example, and change our constraint to allow a maximum retry count of four.
+
+Edit `retry-count-range-constraint.yml` and change the `max` field to have a value of `4`. Save the file.
+
+Apply it to the cluster:
+
+```bash
+$ kubectl apply -f retry-count-range-constraint.yml
+```
+
+We know that the `demo-retries` proxy has a route with a `retryPolicy.count` of `5`. This should now be invalid according to the updated constraint.
+
+Wait up to `60s` for the next periodic audit to finish, then run:
+
+```bash
+$ kubectl describe httpproxyretrycountrange httpproxy-retry-count-range
+```
+
+You should see something like:
+
+```
+...
+Status:
+ ...
+ Violations:
+ Enforcement Action: deny
+ Kind: HTTPProxy
+ Message: retry policy count must be less than or equal to 4
+ Name: demo-retries
+ Namespace: my-namespace
+```
+
+However, our `HTTPProxy` remains in the cluster and can continue to route requests, and the user can remediate the proxy to bring it inline with the policy on their own timeline.
+
+## Next steps
+
+Contour has a [growing library](https://github.com/projectcontour/contour/tree/main/examples/gatekeeper) of Gatekeeper constraint templates and constraints, for both **validations** and **policies**.
+
+If you're using Gatekeeper, we recommend that you apply all of the **validations** we've defined, since these rules are already being checked internally by Contour and reported as status errors/invalid proxies.
+Using the Gatekeeper constraints will only improve the user experience since users will get earlier feedback if their proxies are invalid.
+The **validations** can be found in `examples/gatekeeper/validations`.
+
+
+You should take more of a pick-and-choose approach to our sample **policies**, since every organization will have different policy needs.
+Feel free to use any/all/none of them, and augment them with your own policies if applicable.
+The sample **policies** can be found in `examples/gatekeeper/policies`.
+
+And of course, if you do develop any new constraints that you think may be useful for the broader Contour community, we welcome contributions!
diff --git a/site/content/docs/1.29/guides/gateway-api.md b/site/content/docs/1.29/guides/gateway-api.md
new file mode 100644
index 00000000000..4bcc3140c03
--- /dev/null
+++ b/site/content/docs/1.29/guides/gateway-api.md
@@ -0,0 +1,212 @@
+---
+title: Using Gateway API with Contour
+---
+
+This tutorial walks through an example of using [Gateway API][1] with Contour.
+See the [Contour reference documentation][5] for more information on Contour's Gateway API support.
+
+### Prerequisites
+The following prerequisites must be met before following this guide:
+
+- A working [Kubernetes][2] cluster. Refer to the [compatibility matrix][3] for cluster version requirements.
+- The [kubectl][4] command-line tool, installed and configured to access your cluster.
+
+## Deploy Contour with Gateway API enabled
+
+First, deploy Contour with Gateway API enabled.
+This can be done using either [static or dynamic provisioning][6].
+
+### Option #1: Statically provisioned
+
+Create Gateway API CRDs:
+```shell
+$ kubectl apply -f {{< param github_raw_url>}}/{{< param branch >}}/examples/gateway/00-crds.yaml
+```
+
+Create a GatewayClass:
+```shell
+kubectl apply -f - <}}/quickstart/contour.yaml
+```
+This command creates:
+
+- Namespace `projectcontour` to run Contour
+- Contour CRDs
+- Contour RBAC resources
+- Contour Deployment / Service
+- Envoy DaemonSet / Service
+- Contour ConfigMap
+
+Update the Contour configmap to enable Gateway API processing by specifying a gateway, and restart Contour to pick up the config change:
+
+```shell
+kubectl apply -f - <}}/quickstart/contour-gateway-provisioner.yaml
+```
+
+This command creates:
+
+- Namespace `projectcontour` to run the Gateway provisioner
+- Contour CRDs
+- Gateway API CRDs
+- Gateway provisioner RBAC resources
+- Gateway provisioner Deployment
+
+Create a GatewayClass:
+
+```shell
+kubectl apply -f - <}}/{{< param branch >}}/examples/example-workload/gatewayapi/kuard/kuard.yaml
+```
+This command creates:
+
+- A Deployment named `kuard` in the default namespace to run kuard as the test application.
+- A Service named `kuard` in the default namespace to expose the kuard application on TCP port 80.
+- An HTTPRoute named `kuard` in the default namespace, attached to the `contour` Gateway, to route requests for `local.projectcontour.io` to the kuard service.
+
+Verify the kuard resources are available:
+```shell
+$ kubectl get po,svc,httproute -l app=kuard
+NAME READY STATUS RESTARTS AGE
+pod/kuard-798585497b-78x6x 1/1 Running 0 21s
+pod/kuard-798585497b-7gktg 1/1 Running 0 21s
+pod/kuard-798585497b-zw42m 1/1 Running 0 21s
+
+NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+service/kuard ClusterIP 172.30.168.168 80/TCP 21s
+
+NAME HOSTNAMES
+httproute.gateway.networking.k8s.io/kuard ["local.projectcontour.io"]
+```
+
+## Test Routing
+
+_Note, for simplicity and compatibility across all platforms we'll use `kubectl port-forward` to get traffic to Envoy, but in a production environment you would typically use the Envoy service's address._
+
+Port-forward from your local machine to the Envoy service:
+```shell
+# If using static provisioning
+$ kubectl -n projectcontour port-forward service/envoy 8888:80
+
+# If using dynamic provisioning
+$ kubectl -n projectcontour port-forward service/envoy-contour 8888:80
+```
+
+In another terminal, make a request to the application via the forwarded port (note, `local.projectcontour.io` is a public DNS record resolving to 127.0.0.1 to make use of the forwarded port):
+```shell
+$ curl -i http://local.projectcontour.io:8888
+```
+You should receive a 200 response code along with the HTML body of the main `kuard` page.
+
+You can also open http://local.projectcontour.io:8888/ in a browser.
+
+### Further reading
+
+This guide only scratches the surface of the Gateway API's capabilities. See the [Gateway API website][1] for more information.
+
+
+[1]: https://gateway-api.sigs.k8s.io/
+[2]: https://kubernetes.io/
+[3]: https://projectcontour.io/resources/compatibility-matrix/
+[4]: https://kubernetes.io/docs/tasks/tools/install-kubectl/
+[5]: /docs/{{< param version >}}/config/gateway-api
+[6]: /docs/{{< param version >}}/config/gateway-api#enabling-gateway-api-in-contour
\ No newline at end of file
diff --git a/site/content/docs/1.29/guides/global-rate-limiting.md b/site/content/docs/1.29/guides/global-rate-limiting.md
new file mode 100644
index 00000000000..99a3c45d1bc
--- /dev/null
+++ b/site/content/docs/1.29/guides/global-rate-limiting.md
@@ -0,0 +1,503 @@
+---
+title: Global Rate Limiting
+---
+
+Starting in version 1.13, Contour supports [Envoy global rate limiting][1].
+In global rate limiting, Envoy communicates with an external Rate Limit Service (RLS) over gRPC to make rate limit decisions for each request.
+Envoy is configured to produce 1+ descriptors for incoming requests, containing things like the client IP, header values, and more.
+Envoy sends descriptors to the RLS, and the RLS returns a rate limiting decision to Envoy based on the descriptors and the RLS's configured rate limits.
+
+In this guide, we'll walk through deploying an RLS, configuring it in Contour, and configuring an `HTTPProxy` to use it for rate limiting.
+
+**NOTE: you should not consider the RLS deployment in this guide to be production-ready.**
+The instructions and example YAML below are intended to be a demonstration of functionality only.
+Each user will have their own unique production requirements for their RLS deployment.
+
+## Prerequisites
+
+This guide assumes that you have:
+
+- A local KinD cluster created using [the Contour guide][2].
+- Contour installed and running in the cluster using the [quick start][3].
+
+## Deploy an RLS
+
+For this guide, we'll deploy the [Envoy rate limit service][4] as our RLS.
+Per the project's README:
+
+> The rate limit service is a Go/gRPC service designed to enable generic rate limit scenarios from different types of applications.
+> Applications request a rate limit decision based on a domain and a set of descriptors.
+> The service reads the configuration from disk via [runtime][10], composes a cache key, and talks to the Redis cache.
+> A decision is then returned to the caller.
+
+However, any service that implements the [RateLimitService gRPC interface][5] is supported by Contour/Envoy.
+
+Create a config map with [the ratelimit service configuration][6]:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ratelimit-config
+ namespace: projectcontour
+data:
+ ratelimit-config.yaml: |
+ domain: contour
+ descriptors:
+
+ # requests with a descriptor of ["generic_key": "foo"]
+ # are limited to one per minute.
+ - key: generic_key
+ value: foo
+ rate_limit:
+ unit: minute
+ requests_per_unit: 1
+
+ # each unique remote address (i.e. client IP)
+ # is limited to three requests per minute.
+ - key: remote_address
+ rate_limit:
+ unit: minute
+ requests_per_unit: 3
+```
+
+Create a deployment for the RLS that mounts the config map as a volume.
+**This configuration is for demonstration purposes only and is not a production-ready deployment.**
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: ratelimit
+ name: ratelimit
+ namespace: projectcontour
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ratelimit
+ template:
+ metadata:
+ labels:
+ app: ratelimit
+ spec:
+ containers:
+ - name: redis
+ image: redis:alpine
+ env:
+ - name: REDIS_SOCKET_TYPE
+ value: tcp
+ - name: REDIS_URL
+ value: redis:6379
+ - name: ratelimit
+ image: docker.io/envoyproxy/ratelimit:19f2079f
+ ports:
+ - containerPort: 8080
+ name: http
+ protocol: TCP
+ - containerPort: 8081
+ name: grpc
+ protocol: TCP
+ volumeMounts:
+ - name: ratelimit-config
+ mountPath: /data/ratelimit/config
+ readOnly: true
+ env:
+ - name: USE_STATSD
+ value: "false"
+ - name: LOG_LEVEL
+ value: debug
+ - name: REDIS_SOCKET_TYPE
+ value: tcp
+ - name: REDIS_URL
+ value: localhost:6379
+ - name: RUNTIME_ROOT
+ value: /data
+ - name: RUNTIME_SUBDIRECTORY
+ value: ratelimit
+ - name: RUNTIME_WATCH_ROOT
+ value: "false"
+ # need to set RUNTIME_IGNOREDOTFILES to true to avoid issues with
+ # how Kubernetes mounts configmaps into pods.
+ - name: RUNTIME_IGNOREDOTFILES
+ value: "true"
+ command: ["/bin/ratelimit"]
+ livenessProbe:
+ httpGet:
+ path: /healthcheck
+ port: 8080
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ volumes:
+ - name: ratelimit-config
+ configMap:
+ name: ratelimit-config
+```
+
+Create a service:
+
+```yaml
+apiVersion: v1
+kind: Service
+metadata:
+ name: ratelimit
+ namespace: projectcontour
+spec:
+ ports:
+ - port: 8081
+ name: grpc
+ protocol: TCP
+ selector:
+ app: ratelimit
+ type: ClusterIP
+```
+
+Check the progress of the deployment:
+
+```bash
+$ kubectl -n projectcontour get pods -l app=ratelimit
+NAME READY STATUS RESTARTS AGE
+ratelimit-658f4b8f6b-2hnrf 2/2 Running 0 12s
+```
+
+Once the pod is `Running` with `2/2` containers ready, move onto the next step.
+
+## Configure the RLS with Contour
+
+Create a Contour extension service for the RLS:
+
+```yaml
+apiVersion: projectcontour.io/v1alpha1
+kind: ExtensionService
+metadata:
+ namespace: projectcontour
+ name: ratelimit
+spec:
+ protocol: h2c
+ # The service name and port correspond to
+ # the service we created in the previous
+ # step.
+ services:
+ - name: ratelimit
+ port: 8081
+ timeoutPolicy:
+ response: 100ms
+```
+
+Update the Contour configmap to have the following RLS configuration:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contour
+ namespace: projectcontour
+data:
+ contour.yaml: |
+ rateLimitService:
+ # extensionService is the /
+ # of the ExtensionService we created in the
+ # previous step.
+ extensionService: projectcontour/ratelimit
+ # domain corresponds to the domain in the
+ # projectcontour/ratelimit-config config map.
+ domain: contour
+ # failOpen is whether to allow requests through
+ # if there's an error connecting to the RLS.
+ failOpen: false
+```
+
+Restart Contour to pick up the new config map:
+
+```bash
+$ kubectl -n projectcontour rollout restart deploy/contour
+deployment.apps/contour restarted
+```
+
+## Deploy a sample app
+
+To demonstrate how to use global rate limiting in a `HTTPProxy` resource, we first need to deploy a simple echo application:
+
+```yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ingress-conformance-echo
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: ingress-conformance-echo
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: ingress-conformance-echo
+ spec:
+ containers:
+ - name: conformance-echo
+ image: agervais/ingress-conformance-echo:latest
+ ports:
+ - name: http-api
+ containerPort: 3000
+ readinessProbe:
+ httpGet:
+ path: /health
+ port: 3000
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: ingress-conformance-echo
+spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: http-api
+ selector:
+ app.kubernetes.io/name: ingress-conformance-echo
+```
+
+This echo server will respond with a JSON object that reports information about the HTTP request it received, including the request headers.
+
+Once the application is running, we can expose it to Contour with a `HTTPProxy` resource:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: echo
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: ingress-conformance-echo
+ port: 80
+ - conditions:
+ - prefix: /foo
+ services:
+ - name: ingress-conformance-echo
+ port: 80
+```
+
+We can verify that the application is working by requesting any path:
+
+```bash
+$ curl -k http://local.projectcontour.io/test/$((RANDOM))
+{"TestId":"","Path":"/test/22808","Host":"local.projectcontour.io","Method":"GET","Proto":"HTTP/1.1","Headers":{"Accept":["*/*"],"Content-Length":["0"],"User-Agent":["curl/7.75.0"],"X-Envoy-Expected-Rq-Timeout-Ms":["15000"],"X-Envoy-Internal":["true"],"X-Forwarded-For":["172.18.0.1"],"X-Forwarded-Proto":["http"],"X-Request-Id":["8ecb85e1-271b-44b4-9cf0-4859cbaed7a7"],"X-Request-Start":["t=1612903866.309"]}}
+```
+
+## Add global rate limit policies
+
+Now that we have a working application exposed by a `HTTPProxy` resource, we can add global rate limiting to it.
+
+Edit the `HTTPProxy` that we created in the previous step to add rate limit policies to both routes:
+
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: echo
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: ingress-conformance-echo
+ port: 80
+ rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - remoteAddress: {}
+ - conditions:
+ - prefix: /foo
+ services:
+ - name: ingress-conformance-echo
+ port: 80
+ rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - remoteAddress: {}
+ - entries:
+ - genericKey:
+ value: foo
+```
+
+## Default Global rate limit policy
+
+Contour supports defining a default global rate limit policy in the `rateLimitService` configuration
+which is applied to all virtual hosts unless the host is opted-out by
+explicitly setting `disabled` to `true`. This is useful for a single-tenant
+setup use-case. This means you don't have to edit all HTTPProxy objects with the same rate limit policies, instead you can
+define the policies in the `rateLimitService` configuration like this:
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contour
+ namespace: projectcontour
+data:
+ contour.yaml: |
+ rateLimitService:
+ extensionService: projectcontour/ratelimit
+ domain: contour
+ failOpen: false
+ defaultGlobalRateLimitPolicy:
+ descriptors:
+ - entries:
+ - requestHeader:
+ headerName: X-Custom-Header
+ descriptorKey: CustomHeader
+```
+
+Virtual host can opt out by setting `disabled` to `true`.
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: echo
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ rateLimitPolicy:
+ global:
+ disabled: true
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: ingress-conformance-echo
+ port: 80
+```
+
+Also, the default global rate limit policy is not applied in case the virtual host defines its own global rate limit policy.
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: echo
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - remoteAddress: {}
+ routes:
+ - conditions:
+ - prefix: /
+ services:
+ - name: ingress-conformance-echo
+ port: 80
+```
+
+## Make requests
+
+Before making requests to our `HTTPProxy`, let's quickly revisit the `ratelimit-config` config map.
+Here's what we defined:
+
+```yaml
+...
+descriptors:
+ # requests with a descriptor of ["generic_key": "foo"]
+ # are limited to one per minute.
+ - key: generic_key
+ value: foo
+ rate_limit:
+ unit: minute
+ requests_per_unit: 1
+
+ # each unique remote address (i.e. client IP)
+ # is limited to three total requests per minute.
+ - key: remote_address
+ rate_limit:
+ unit: minute
+ requests_per_unit: 3
+```
+
+The first entry says that requests with a descriptor of `["generic_key": "foo"]` should be limited to one per minute.
+The second entry says that each unique remote address (client IP) should be allowed three total requests per minute.
+All relevant rate limits are applied for each request, and requests that result in a `429 (Too Many Requests)` count against limits.
+
+So, we should be able to make:
+- a first request to `local.projectcontour.io/foo` that get a `200 (OK)` response
+- a second request to `local.projectcontour.io/foo` that gets a `429 (Too Many Requests)` response (due to the first rate limit)
+- a third request to `local.projectcontour.io/bar`that gets a `200 (OK)` response
+- a fourth request to `local.projectcontour.io/bar`that gets a `429 (Too Many Requests)` response (due to the second rate limit)
+
+Let's try it out (remember, you'll need to make all of these requests within 60 seconds since the rate limits are per minute):
+
+Request #1:
+```
+$ curl -I local.projectcontour.io/foo
+
+HTTP/1.1 200 OK
+content-type: application/json
+date: Mon, 08 Feb 2021 22:25:06 GMT
+content-length: 403
+x-envoy-upstream-service-time: 4
+vary: Accept-Encoding
+server: envoy
+```
+
+Request #2:
+
+```
+$ curl -I local.projectcontour.io/foo
+
+HTTP/1.1 429 Too Many Requests
+x-envoy-ratelimited: true
+date: Mon, 08 Feb 2021 22:59:10 GMT
+server: envoy
+transfer-encoding: chunked
+```
+
+Request #3:
+
+```
+$ curl -I local.projectcontour.io/bar
+
+HTTP/1.1 200 OK
+content-type: application/json
+date: Mon, 08 Feb 2021 22:59:54 GMT
+content-length: 404
+x-envoy-upstream-service-time: 2
+vary: Accept-Encoding
+server: envoy
+```
+
+Request #4:
+
+```
+$ curl -I local.projectcontour.io/bar
+
+HTTP/1.1 429 Too Many Requests
+x-envoy-ratelimited: true
+date: Mon, 08 Feb 2021 23:00:28 GMT
+server: envoy
+transfer-encoding: chunked
+```
+
+## Wrapping up
+
+For more information, see the [Contour rate limiting documentation][7] and the [API reference documentation][8].
+
+The YAML used in this guide is available [in the Contour repository][9].
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/other_features/global_rate_limiting
+[2]: ../deploy-options/#kind
+[3]: https://projectcontour.io/getting-started/#option-1-quickstart
+[4]: https://github.com/envoyproxy/ratelimit
+[5]: https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/ratelimit/v3/rls.proto
+[6]: https://github.com/envoyproxy/ratelimit#configuration
+[7]: ../config/rate-limiting/
+[8]: ../config/api/
+[9]: {{< param github_url>}}/tree/main/examples/ratelimit
+[10]: https://github.com/lyft/goruntime
diff --git a/site/content/docs/1.29/guides/grpc.md b/site/content/docs/1.29/guides/grpc.md
new file mode 100644
index 00000000000..12f0d9035fb
--- /dev/null
+++ b/site/content/docs/1.29/guides/grpc.md
@@ -0,0 +1,225 @@
+---
+title: Configuring ingress to gRPC services with Contour
+---
+
+## Example gRPC Service
+
+The below examples use the [gRPC server][1] used in Contour end to end tests.
+The server implements a service `yages.Echo` with two methods `Ping` and `Reverse`.
+It also implements the [gRPC health checking service][2] (see [here][3] for more details) and is bundled with the [gRPC health probe][4].
+
+An example base deployment and service for a gRPC server utilizing plaintext HTTP/2 are provided here:
+
+```yaml
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: grpc-echo
+ name: grpc-echo
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: grpc-echo
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: grpc-echo
+ spec:
+ containers:
+ - name: grpc-echo
+ image: ghcr.io/projectcontour/yages:v0.1.0
+ ports:
+ - name: grpc
+ containerPort: 9000
+ readinessProbe:
+ exec:
+ command: ["/grpc-health-probe", "-addr=localhost:9000"]
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: grpc-echo
+ name: grpc-echo
+spec:
+ selector:
+ app.kubernetes.io/name: grpc-echo
+ ports:
+ - port: 9000
+ protocol: TCP
+ targetPort: grpc
+```
+
+## HTTPProxy Configuration
+
+Configuring proxying to a gRPC service with HTTPProxy is as simple as specifying the protocol Envoy uses with the upstream application via the `spec.routes[].services[].protocol` field.
+For example, in the resource below, for proxying plaintext gRPC to the `yages` sample app, the protocol is set to `h2c` to denote HTTP/2 over cleartext.
+For TLS secured gRPC, the protocol used would be `h2`.
+
+Route path prefix matching can be used to match a specific gRPC message if required.
+
+```yaml
+---
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: my-grpc-service
+spec:
+ virtualhost:
+ fqdn: my-grpc-service.foo.com
+ routes:
+ - conditions:
+ - prefix: /yages.Echo/Ping # Matches a specific gRPC method.
+ services:
+ - name: grpc-echo
+ port: 9000
+ protocol: h2c
+ - conditions:
+ - prefix: / # Matches everything else.
+ services:
+ - name: grpc-echo
+ port: 9000
+ protocol: h2c
+```
+
+Using the sample deployment above along with this HTTPProxy example, you can test calling this plaintext gRPC server with the following [grpcurl][5] command:
+
+```
+grpcurl -plaintext -authority=my-grpc-service.foo.com yages.Echo/Ping
+```
+
+If implementing a streaming RPC, it is likely you will need to adjust per-route timeouts to ensure streams are kept alive for the appropriate durations needed.
+Relevant timeout fields to adjust include the HTTPProxy `spec.routes[].timeoutPolicy.response` field which defaults to 15s and should be increased as well as the global timeout policy configurations in the Contour configuration file `timeouts.request-timeout` and `timeouts.max-connection-duration`.
+
+## Ingress v1 Configuration
+
+To configure routing for gRPC requests with Ingress v1, you must add an annotation on the upstream Service resource as below.
+
+```yaml
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app.kubernetes.io/name: grpc-echo
+ annotations:
+ projectcontour.io/upstream-protocol.h2c: "9000"
+ name: grpc-echo
+spec:
+ selector:
+ app.kubernetes.io/name: grpc-echo
+ ports:
+ - port: 9000
+ protocol: TCP
+ targetPort: grpc
+```
+
+The annotation key must follow the form `projectcontour.io/upstream-protocol.{protocol}` where `{protocol}` is `h2c` for plaintext gRPC or `h2` for TLS encrypted gRPC to the upstream application.
+The annotation value contains a comma-separated list of port names and/or numbers that must match with the ones defined in the Service definition.
+
+Using the Service above with the Ingress resource below should achieve the same configuration as with an HTTPProxy.
+
+```yaml
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: my-grpc-service
+spec:
+ rules:
+ - host: my-grpc-service.foo.com
+ http:
+ paths:
+ - path: /
+ backend:
+ service:
+ name: grpc-echo
+ port:
+ number: 9000
+ pathType: Prefix
+```
+
+## Gateway API Configuration
+
+Gateway API now supports a specific resource [GRPCRoute][6] for routing gRPC requests.
+
+Configuring GRPCRoute for routing gRPC requests needs to specify parentRefs, hostnames, and routing rules with specific backendRefs. In the below example, route path matching is conducted via method matching rule for declared services and their methods.
+
+```yaml
+apiVersion: gateway.networking.k8s.io/v1alpha2
+kind: GRPCRoute
+metadata:
+ name: yages
+spec:
+ parentRefs:
+ - namespace: projectcontour
+ name: contour
+ hostnames:
+ - my-grpc-service.foo.com
+ rules:
+ - matches:
+ - method:
+ service: yages.Echo
+ method: Ping
+ - method:
+ service: grpc.reflection.v1alpha.ServerReflection
+ method: ServerReflectionInfo
+ backendRefs:
+ - name: grpc-echo
+ port: 9000
+```
+Using the sample deployment above along with this GRPCRoute example, you can test calling this plaintext gRPC server with the same grpcurl command:
+
+```yaml
+grpcurl -plaintext -authority=my-grpc-service.foo.com yages.Echo/Ping
+```
+Note that the second matching method for service of ServerReflection is required by grpcurl command.
+
+When using GRPCRoute, user should annotate their Service similarly to when using Ingress Configuration, to indicate the protocol to use when connecting to the backend Service, i.e. h2c for HTTP plaintext and h2 for TLS encrypted HTTPS. If it's not specified, Contour will infer the protocol based on the Gateway Listener protocol, h2c for HTTP and h2 for HTTPS.
+
+
+
+
+## gRPC-Web
+
+Contour configures Envoy to automatically convert [gRPC-Web][7] HTTP/1 requests to gRPC over HTTP/2 RPC calls to an upstream service.
+This is a convenience addition to make usage of gRPC web application client libraries and the like easier.
+
+Note that you still must provide configuration of the upstream protocol to have gRPC-Web requests converted to gRPC to the upstream app.
+If your upstream application does not in fact support gRPC, you may get a protocol error.
+In that case, please see [this issue][8].
+
+For example, with the example deployment and routing configuration provided above, an example HTTP/1.1 request and response via `curl` looks like:
+
+```
+curl \
+ -s -v \
+ /yages.Echo/Ping \
+ -XPOST \
+ -H 'Host: my-grpc-service.foo.com' \
+ -H 'Content-Type: application/grpc-web-text' \
+ -H 'Accept: application/grpc-web-text' \
+ -d'AAAAAAA='
+```
+
+This `curl` command sends and receives gRPC messages as base 64 encoded text over HTTP/1.1.
+Piping the output to `base64 -d | od -c` we can see the raw text gRPC response:
+
+```
+0000000 \0 \0 \0 \0 006 \n 004 p o n g 200 \0 \0 \0 036
+0000020 g r p c - s t a t u s : 0 \r \n g
+0000040 r p c - m e s s a g e : \r \n
+0000056
+```
+
+[1]: https://github.com/projectcontour/yages
+[2]: https://pkg.go.dev/google.golang.org/grpc/health/grpc_health_v1
+[3]: https://github.com/grpc/grpc/blob/master/doc/health-checking.md
+[4]: https://github.com/grpc-ecosystem/grpc-health-probe
+[5]: https://github.com/fullstorydev/grpcurl
+[6]: https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRoute
+[7]: https://github.com/grpc/grpc-web
+[8]: https://github.com/projectcontour/contour/issues/4290
diff --git a/site/content/docs/1.29/guides/health-checking.md b/site/content/docs/1.29/guides/health-checking.md
new file mode 100644
index 00000000000..8e7bcdb5bb5
--- /dev/null
+++ b/site/content/docs/1.29/guides/health-checking.md
@@ -0,0 +1,11 @@
+---
+title: Health Checking
+---
+
+Contour exposes two health endpoints `/health` and `/healthz`. By default these paths are serviced by `0.0.0.0:8000` and are configurable using the `--health-address` and `--health-port` flags.
+
+e.g. `--health-port 9999` would create a health listener of `0.0.0.0:9999`
+
+**Note:** the `Service` deployment manifest when installing Contour must be updated to represent the same port as the above configured flags.
+
+The health endpoints perform a connection to the Kubernetes cluster's API.
diff --git a/site/content/docs/1.29/guides/kind.md b/site/content/docs/1.29/guides/kind.md
new file mode 100644
index 00000000000..dcc374b70af
--- /dev/null
+++ b/site/content/docs/1.29/guides/kind.md
@@ -0,0 +1,63 @@
+---
+title: Creating a Contour-compatible kind cluster
+---
+
+This guide walks through creating a kind (Kubernetes in Docker) cluster on your local machine that can be used for developing and testing Contour.
+
+# Prerequisites
+
+Download & install Docker and kind:
+
+- Docker [installation information](https://docs.docker.com/desktop/#download-and-install)
+- kind [download and install instructions](https://kind.sigs.k8s.io/docs/user/quick-start/)
+
+# Kind configuration file
+
+Create a kind configuration file locally.
+This file will instruct kind to create a cluster with one control plane node and one worker node, and to map ports 80 and 443 on your local machine to ports 80 and 443 on the worker node container.
+This will allow us to easily get traffic to Contour/Envoy running inside the kind cluster from our local machine.
+
+Copy the text below into the local yaml file `kind-config.yaml`:
+
+```yaml
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+nodes:
+- role: control-plane
+- role: worker
+ extraPortMappings:
+ - containerPort: 80
+ hostPort: 80
+ listenAddress: "0.0.0.0"
+ - containerPort: 443
+ hostPort: 443
+ listenAddress: "0.0.0.0"
+```
+
+# Kubernetes cluster using kind
+
+Create a kind cluster using the config file from above:
+
+```yaml
+$ kind create cluster --config kind-config.yaml
+```
+
+Verify the nodes are ready by running:
+
+```yaml
+$ kubectl get nodes
+```
+
+You should see 2 nodes listed with status **Ready**:
+- kind-control-plane
+- kind-worker
+
+Congratulations, you have created your cluster environment. You're ready to install Contour.
+
+_Note:_ When you are done with the cluster, you can delete it by running:
+```yaml
+$ kind delete cluster
+```
+
+# Next Steps
+See https://projectcontour.io/getting-started/ for how to install Contour into your kind cluster.
diff --git a/site/content/docs/1.29/guides/metrics/table.md b/site/content/docs/1.29/guides/metrics/table.md
new file mode 100644
index 00000000000..89405d815c6
--- /dev/null
+++ b/site/content/docs/1.29/guides/metrics/table.md
@@ -0,0 +1,20 @@
+| Name | Type | Labels | Description |
+| ---- | ---- | ------ | ----------- |
+| contour_build_info | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | branch, revision, version | Build information for Contour. Labels include the branch and git SHA that Contour was built from, and the Contour version. |
+| contour_cachehandler_onupdate_duration_seconds | [SUMMARY](https://prometheus.io/docs/concepts/metric_types/#summary) | | Histogram for the runtime of xDS cache regeneration. |
+| contour_dag_cache_object | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | kind | Total number of items that are currently in the DAG cache. |
+| contour_dagrebuild_seconds | [SUMMARY](https://prometheus.io/docs/concepts/metric_types/#summary) | | Duration in seconds of DAG rebuilds |
+| contour_dagrebuild_timestamp | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | | Timestamp of the last DAG rebuild. |
+| contour_dagrebuild_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | | Total number of times DAG has been rebuilt since startup |
+| contour_eventhandler_operation_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | kind, op | Total number of Kubernetes object changes Contour has received by operation and object kind. |
+| contour_httpproxy | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | namespace | Total number of HTTPProxies that exist regardless of status. |
+| contour_httpproxy_invalid | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | namespace, vhost | Total number of invalid HTTPProxies. |
+| contour_httpproxy_orphaned | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | namespace | Total number of orphaned HTTPProxies which have no root delegating to them. |
+| contour_httpproxy_root | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | namespace | Total number of root HTTPProxies. Note there will only be a single root HTTPProxy per vhost. |
+| contour_httpproxy_valid | [GAUGE](https://prometheus.io/docs/concepts/metric_types/#gauge) | namespace, vhost | Total number of valid HTTPProxies. |
+| contour_status_update_conflict_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | kind | Number of status update conflicts encountered by object kind. |
+| contour_status_update_duration_seconds | [SUMMARY](https://prometheus.io/docs/concepts/metric_types/#summary) | error, kind | How long a status update takes to finish. |
+| contour_status_update_failed_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | kind | Number of status updates that failed by object kind. |
+| contour_status_update_noop_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | kind | Number of status updates that are no-ops by object kind. This is a subset of successful status updates. |
+| contour_status_update_success_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | kind | Number of status updates that succeeded by object kind. |
+| contour_status_update_total | [COUNTER](https://prometheus.io/docs/concepts/metric_types/#counter) | kind | Total number of status updates by object kind. |
diff --git a/site/content/docs/1.29/guides/prometheus.md b/site/content/docs/1.29/guides/prometheus.md
new file mode 100644
index 00000000000..f0b7364c340
--- /dev/null
+++ b/site/content/docs/1.29/guides/prometheus.md
@@ -0,0 +1,94 @@
+---
+title: Collecting Metrics with Prometheus
+---
+
+
+
+Contour and Envoy expose metrics that can be scraped with Prometheus. By
+default, annotations to gather them are in all the `deployment` yamls and they
+should work out of the box with most configurations.
+
+## Envoy Metrics
+
+Envoy typically [exposes metrics](https://www.envoyproxy.io/docs/envoy/v1.15.0/configuration/http/http_conn_man/stats#config-http-conn-man-stats) through an endpoint on its admin interface. To
+avoid exposing the entire admin interface to Prometheus (and other workloads in
+the cluster), Contour configures a static listener that sends traffic to the
+stats endpoint and nowhere else.
+
+Envoy supports Prometheus-compatible `/stats/prometheus` endpoint for metrics on
+port `8002`.
+
+## Contour Metrics
+
+Contour exposes a Prometheus-compatible `/metrics` endpoint that defaults to listening on port 8000. This can be configured by using the `--http-address` and `--http-port` flags for the `serve` command.
+
+**Note:** the `Service` deployment manifest when installing Contour must be updated to represent the same port as the configured flag.
+
+**The metrics endpoint exposes the following metrics:**
+
+{{% metrics-table %}}
+
+## Sample Deployment
+
+In the `/examples` directory there are example deployment files that can be used to spin up an example environment.
+All deployments there are configured with annotations for prometheus to scrape by default, so it should be possible to utilize any of them with the following quickstart example instructions.
+
+### Deploy Prometheus
+
+A sample deployment of Prometheus and Alertmanager is provided that uses temporary storage. This deployment can be used for testing and development, but might not be suitable for all environments.
+
+#### Stateful Deployment
+
+ A stateful deployment of Prometheus should use persistent storage with [Persistent Volumes and Persistent Volume Claims][1] to maintain a correlation between a data volume and the Prometheus Pod.
+ Persistent volumes can be static or dynamic and depends on the backend storage implementation utilized in environment in which the cluster is deployed. For more information, see the [Kubernetes documentation on types of persistent volumes][2].
+
+#### Quick start
+
+```sh
+# Deploy
+$ kubectl apply -f examples/prometheus
+```
+
+#### Access the Prometheus web UI
+
+```sh
+$ kubectl -n projectcontour-monitoring port-forward $(kubectl -n projectcontour-monitoring get pods -l app=prometheus -l component=server -o jsonpath='{.items[0].metadata.name}') 9090:9090
+```
+
+then go to `http://localhost:9090` in your browser.
+
+#### Access the Alertmanager web UI
+
+```sh
+$ kubectl -n projectcontour-monitoring port-forward $(kubectl -n projectcontour-monitoring get pods -l app=prometheus -l component=alertmanager -o jsonpath='{.items[0].metadata.name}') 9093:9093
+```
+
+then go to `http://localhost:9093` in your browser.
+
+### Deploy Grafana
+
+A sample deployment of Grafana is provided that uses temporary storage.
+
+#### Quick start
+
+```sh
+# Deploy
+$ kubectl apply -f examples/grafana/
+
+# Create secret with grafana credentials
+$ kubectl create secret generic grafana -n projectcontour-monitoring \
+ --from-literal=grafana-admin-password=admin \
+ --from-literal=grafana-admin-user=admin
+```
+
+#### Access the Grafana UI
+
+```sh
+$ kubectl port-forward $(kubectl get pods -l app=grafana -n projectcontour-monitoring -o jsonpath='{.items[0].metadata.name}') 3000 -n projectcontour-monitoring
+```
+
+then go to `http://localhost:3000` in your browser.
+The username and password are from when you defined the Grafana secret in the previous step.
+
+[1]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
+[2]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes
\ No newline at end of file
diff --git a/site/content/docs/1.29/guides/proxy-proto.md b/site/content/docs/1.29/guides/proxy-proto.md
new file mode 100644
index 00000000000..7753d8c5776
--- /dev/null
+++ b/site/content/docs/1.29/guides/proxy-proto.md
@@ -0,0 +1,53 @@
+---
+title: How to Configure PROXY v1/v2 Support
+---
+
+If you deploy Contour as a Deployment or Daemonset, you will likely use a `type: LoadBalancer` Service to request an [external load balancer][1] from your hosting provider.
+If you use the Elastic Load Balancer (ELB) service from Amazon's EC2, you need to perform a couple of additional steps to enable the [PROXY][0] protocol. Here's why:
+
+External load balancers typically operate in one of two modes: a layer 7 HTTP proxy, or a layer 4 TCP proxy.
+The former cannot be used to load balance TLS traffic, because your cloud provider attempts HTTP negotiation on port 443.
+So the latter must be used when Contour handles HTTP and HTTPS traffic.
+
+However this leads to a situation where the remote IP address of the client is reported as the inside address of your cloud provider's load balancer.
+To rectify the situation, you can add annotations to your service and flags to your Contour Deployment or DaemonSet to enable the [PROXY][0] protocol which forwards the original client IP details to Envoy.
+
+## Enable PROXY protocol on your service in GKE
+
+In GKE clusters a `type: LoadBalancer` Service is provisioned as a Network Load Balancer and will forward traffic to your Envoy instances with their client addresses intact.
+Your services should see the addresses in the `X-Forwarded-For` or `X-Envoy-External-Address` headers without having to enable a PROXY protocol.
+
+## Enable PROXY protocol on your service in AWS
+
+To instruct EC2 to place the ELB into `tcp`+`PROXY` mode, add the following annotations to the `contour` Service:
+
+```
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
+ service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*'
+ name: contour
+ namespace: projectcontour
+spec:
+ type: LoadBalancer
+...
+```
+
+## Enable PROXY protocol support for all Envoy listening ports
+
+```
+...
+spec:
+ containers:
+ - image: ghcr.io/projectcontour/contour:
+ imagePullPolicy: Always
+ name: contour
+ command: ["contour"]
+ args: ["serve", "--incluster", "--use-proxy-protocol"]
+...
+```
+
+[0]: http://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
+[1]: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer
\ No newline at end of file
diff --git a/site/content/docs/1.29/guides/resource-limits.md b/site/content/docs/1.29/guides/resource-limits.md
new file mode 100644
index 00000000000..a531221a88f
--- /dev/null
+++ b/site/content/docs/1.29/guides/resource-limits.md
@@ -0,0 +1,161 @@
+---
+title: Contour / Envoy Resource Limits
+---
+
+## Performance Testing Contour / Envoy
+
+- Cluster Specs
+ - Kubernetes
+ - Version: v1.12.6
+ - Nodes:
+ - 5 Worker Nodes
+ - 2 CPUs Per Node
+ - 8 GB RAM Per Node
+ - 10 GB Network
+ - Contour
+ - Single Instance
+ - 4 Instances of Envoy running in a Daemonset
+ - Each instance of Envoy is running with HostNetwork
+ - Cluster Network Bandwidth
+
+Having a good understanding of the available bandwidth is key when it comes to analyzing performance. It will give you a sense of how many requests per second you can expect to push through the network you are working with.
+
+Use iperf3 to figure out the bandwidth available between two of the kubernetes nodes. The following will deploy an iperf3 server on one node, and an iperf3 client on another node:
+
+```bash
+[ ID] Interval Transfer Bandwidth Retr
+[ 4] 0.00-60.00 sec 34.7 GBytes 4.96 Gbits/sec 479 sender
+[ 4] 0.00-60.00 sec 34.7 GBytes 4.96 Gbits/sec receiver
+```
+
+## Memory / CPU usage
+
+Verify the Memory & CPU usage with varying numbers of services, IngressRoute resources, and traffic load into the cluster.
+
+
+
+ Test Criteria |
+ Contour |
+ Envoy |
+
+
+ #Svc |
+ #Ing |
+ RPS |
+ CC |
+ Memory (MB) |
+ CPU% / Core |
+ Memory (MB) |
+ CPU% / Core |
+
+
+ 0 |
+ 0 |
+ 0 |
+ 0 |
+ 10 |
+ 0 |
+ 15 |
+ 0 |
+
+
+ 5k |
+ 0 |
+ 0 |
+ 0 |
+ 46 |
+ 2% |
+ 15 |
+ 0% |
+
+
+ 10k |
+ 0 |
+ 0 |
+ 0 |
+ 77 |
+ 3% |
+ 205 |
+ 2% |
+
+
+ 0 |
+ 5k |
+ 0 |
+ 0 |
+ 36 |
+ 1% |
+ 230 |
+ 2% |
+
+
+ 0 |
+ 10k |
+ 0 |
+ 0 |
+ 63 |
+ 1% |
+ 10 |
+ 1% |
+
+
+ 5k |
+ 5k |
+ 0 |
+ 0 |
+ 244 |
+ 1% |
+ 221 |
+ 1% |
+
+
+ 10k |
+ 10k |
+ 0 |
+ 0 |
+ 2600 |
+ 6% |
+ 430 |
+ 4% |
+
+
+ 0 |
+ 0 |
+ 30k |
+ 600 |
+ 8 |
+ 1% |
+ 17 |
+ 3% |
+
+
+ 0 |
+ 0 |
+ 100k |
+ 10k |
+ 10 |
+ 1% |
+ 118 |
+ 14% |
+
+
+ 0 |
+ 0 |
+ 200k |
+ 20k |
+ 9 |
+ 1% |
+ 191 |
+ 31% |
+
+
+ 0 |
+ 0 |
+ 300k |
+ 30k |
+ 10 |
+ 1% |
+ 225 |
+ 40% |
+
+
diff --git a/site/content/docs/1.29/img/archoverview.png b/site/content/docs/1.29/img/archoverview.png
new file mode 100644
index 00000000000..f79bbfe1b4b
Binary files /dev/null and b/site/content/docs/1.29/img/archoverview.png differ
diff --git a/site/content/docs/1.29/img/contour_deployment_in_k8s.png b/site/content/docs/1.29/img/contour_deployment_in_k8s.png
new file mode 100644
index 00000000000..add5e554a07
Binary files /dev/null and b/site/content/docs/1.29/img/contour_deployment_in_k8s.png differ
diff --git a/site/content/docs/1.29/img/shutdownmanager.png b/site/content/docs/1.29/img/shutdownmanager.png
new file mode 100644
index 00000000000..ab8b7821d3c
Binary files /dev/null and b/site/content/docs/1.29/img/shutdownmanager.png differ
diff --git a/site/content/docs/1.29/img/source/shutdownmanager.drawio b/site/content/docs/1.29/img/source/shutdownmanager.drawio
new file mode 100644
index 00000000000..99b86620c42
--- /dev/null
+++ b/site/content/docs/1.29/img/source/shutdownmanager.drawio
@@ -0,0 +1 @@
+7Vtbc9o4FP41zOw+kLF84fKYG2236ZTZZKbNo7AFdiNbVBYJ9NfvkS3fjQ3EYWlKXmIdyUdC33duEvSMa3/9geOl+4U5hPZ0zVn3jJueriNkIvgnJZtYMhqYsWDBPUcNygT33i+ihJqSrjyHhIWBgjEqvGVRaLMgILYoyDDn7KU4bM5ocdYlXpCK4N7GtCr95jnCTT6XpmUdH4m3cNXUI0t1zLD9tOBsFaj5eroxj/7ibh8nutT40MUOe8mJjNuecc0ZE/GTv74mVO5tsm3xe5Mtvem6OQnELi8MZ3iGkW3ObH04N2daX481PGO6UntxGzyzjVqt2CQ7BAtfyseVT++8OaFeAK2rJeGeTwTh0EOVeJrJrl5cT5D7Jbblqy/AHZC5wqfQQvAIcAoMr/C0TSleht4smlUDCSf2iofeM/mXhDFrpJSthJzpOmVDNFSiQBylKt3oWK/v2WoYxTNCr1LYrhllcvqARR8oFJw9pRyQ785hjRPse1RS+25lew6GnYGpQyZXGfUrQiOA8wpTbxFAwwZEoj2oQqRQeyZckHVOpCD7QBjsHwcMNNVrJjxMzEs1X3JcTYa4OZoOEiFW9rFIVWccgQdFkx0pkxhtjjMVuuRAXjIvENH81lXPuimxhnHhsgULMM3zJsNSy2OpnR6WWw1qd3CtIraoBtsqtOZw/AbQGhVk792VgM0PQPoFB+AG+dk1dEanBcVhqCbvwE1Y1km5iSqZzm6iYmkHu4k6bGvcxKgDaBff6cPPZ775yZxBXzy6/6zNSR9ZFXinLHrt65IE8X5JE/Rg25pwlzvgQSZ2qTZxxoRgPnSQwLmUqZ2UUWY/tQK5L2yEziLtyQqkyIG0UC0zo9xtJr0Cn/OcepLdcZaKd0FZuxho5riAtGpxQrEAb1fMfWtQVbqn0p620scYDIoaQrbiNlEv5RPIZj2mZhX1CMwXRFT0AJJ4kxumrH3rcvuolPIkC962rsoLycoyjsdrKL2eLIjN5yERvbJVpAgd5gNHNS5wQIUiIzwv5PPnFdBaTh53wSz53nO8PaVUHILlCcXY8TnEbne9o32xPaVMHKEKtFMOUYgtZTRZEzs6DfF9HDjw9Jc0X/Dgsi8u37VQJe5/n2jwzUXaKBDng2xdZA5gR7+DrK9d6En7UbLvwhiaieBmrfgYtzb5Vo7VzQl3HMJastrdA7pmIrOTEG6gUgw3Sy5l1xhujbVmRVuC+LGiZrJfOe5/fHiYgh5delHt6+f3wWkHh25DLqkIEqPanF42Una0L2Mt9EaMhcUdytjxRYuqjhJPs2QbyTzbVmaUam7Lahlf1l8cf5Q0tbaeG1ZsTh0EazccEsPfoqDb0/62mlyrqbzSLqxxqYpCB3py02pRtMUquiJNNf+8g10JSAgM0aaczUhS1Mx4Vs9MXIKpcH+9cxY1OuU90wjIetDQ0gtg9y2jIy89LOhFY3QgG80WRa15RfJpLbP+k769G6zJu7eV7DyAbFIS/XL66Vy7/wa1O0JFAzpq7V7PNr2GbX9qAd9sj7/vQXm1mnkg3PcCLIiMkszpKg6mNXJa8z7uhO67zbPQsFToHppnGeXT6CPnWWhQ4VB2GhTXxtpHxp7gH3j4JSBHXksqJm9xSjUqdCsejGRr7YmUbfAcn8ZAats13w7nTy57QokneW22pJcKPkM/LMkpn8JUFLWS8wjZUPUCI6kKJ9ijEd2w4xUT/k54d0InM7C4iSf3rfnYsJWMSTI77IaHWjHZNvemT+IkBy2KjnsaWMvDxOYKl80QN7U0kp6D6FsF0YrDK2dKux9WtCh64yCq15V25xOIN6ZPK+o7nwGPjkYfM+gPfiB0F5DBT/poDb0f80/9pgu5Qgr2HiiV5nYX40J2ZxpD1c6u2mRjk2vsfNHWeLMx/n+8mzHsyLtVFB03jNZSeL/Thj+TraXUL7t1lnlS4d5ZaT+xe+fXuutBhcYXhm4cZhOVi+YObWLvi70t9ll78daVwdV9BXYSpl+pLtmexPZOGlLxeLdydFY2TN9znMg0uDzdxdmxb/W8sEq+RkdRPnRLf4GjJunlf8VSf5lsmGOl7ZXM7MvvUqAChv3yMcwhrhOa2e9v4uHZj5yM2/8A
\ No newline at end of file
diff --git a/site/content/docs/1.29/redeploy-envoy.md b/site/content/docs/1.29/redeploy-envoy.md
new file mode 100644
index 00000000000..2456b53d2bf
--- /dev/null
+++ b/site/content/docs/1.29/redeploy-envoy.md
@@ -0,0 +1,72 @@
+# Redeploying Envoy
+
+The Envoy process, the data path component of Contour, at times needs to be re-deployed.
+This could be due to an upgrade, a change in configuration, or a node-failure forcing a redeployment.
+
+When implementing this roll out, the following steps should be taken:
+
+1. Stop Envoy from accepting new connections
+2. Start draining existing connections in Envoy by sending a `POST` request to `/healthcheck/fail` endpoint
+3. Wait for connections to drain before allowing Kubernetes to `SIGTERM` the pod
+
+## Overview
+
+Contour implements an `envoy` sub-command named `shutdown-manager` whose job is to manage a single Envoy instances lifecycle for Kubernetes.
+The `shutdown-manager` runs as a new container alongside the Envoy container in the same pod.
+It uses a Kubernetes `preStop` event hook to keep the Envoy container running while waiting for connections to drain. The `/shutdown` endpoint blocks until the connections are drained.
+
+```yaml
+ - name: shutdown-manager
+ command:
+ - /bin/contour
+ args:
+ - envoy
+ - shutdown-manager
+ image: ghcr.io/projectcontour/contour:main
+ imagePullPolicy: Always
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /bin/contour
+ - envoy
+ - shutdown
+```
+
+The Envoy container also has some configuration to implement the shutdown manager.
+First the `preStop` hook is configured to use the `/shutdown` endpoint which blocks the Envoy container from exiting.
+Finally, the pod's `terminationGracePeriodSeconds` is customized to extend the time in which Kubernetes will allow the pod to be in the `Terminating` state.
+The termination grace period defines an upper bound for long-lived sessions.
+If during shutdown, the connections aren't drained to the configured amount, the `terminationGracePeriodSeconds` will send a `SIGTERM` to the pod killing it.
+
+![shutdown-manager overview][1]
+
+### Shutdown Manager Config Options
+
+The `shutdown-manager` runs as another container in the Envoy pod.
+When the pod is requested to terminate, the `preStop` hook on the `shutdown-manager` executes the `contour envoy shutdown` command initiating the shutdown sequence.
+
+The shutdown manager has a single argument that can be passed to change how it behaves:
+
+| Name | Type | Default | Description |
+|------------|------|---------|-------------|
+| serve-port | integer | 8090 | Port to serve the http server on |
+| ready-file | string | /admin/ok | File to poll while waiting shutdown to be completed. |
+
+### Shutdown Config Options
+
+The `shutdown` command does the work of draining connections from Envoy and polling for open connections.
+
+The shutdown command has a few arguments that can be passed to change how it behaves:
+
+| Name | Type | Default | Description |
+|------------|------|---------|-------------|
+| check-interval | duration | 5s | Time interval to poll Envoy for open connections. |
+| check-delay | duration | 0s | Time wait before polling Envoy for open connections. |
+| drain-delay | duration | 0s | Time wait before draining Envoy connections. |
+| min-open-connections | integer | 0 | Min number of open connections when polling Envoy. |
+| admin-port (Deprecated) | integer | 9001 | Deprecated: No longer used, Envoy admin interface runs as a unix socket. |
+| admin-address | string | /admin/admin.sock | Path to Envoy admin unix domain socket. |
+| ready-file | string | /admin/ok | File to write when shutdown is completed. |
+
+ [1]: ../img/shutdownmanager.png
diff --git a/site/content/docs/1.29/start-contributing.md b/site/content/docs/1.29/start-contributing.md
new file mode 100644
index 00000000000..2ddefb6c485
--- /dev/null
+++ b/site/content/docs/1.29/start-contributing.md
@@ -0,0 +1,130 @@
+# Getting Started with Contributing
+
+Thanks for your interest in contributing to Contour. Community contributions are always needed, welcome, and appreciated. This guide shows how you can contribute to Contour in the following areas:
+
+- Code
+- Website
+- Documentation
+
+Please familiarize yourself with the [Code of Conduct][1] and project [Philosophy][15] before contributing.
+
+# Getting Started with Code
+
+Everything is managed on the [Project Contour GitHub][2] organization. Create an issue for a new idea or look for issues labeled **good first issue** to get started.
+
+## How we work
+
+See [How We Work][3] for an overview:
+- Issue management
+- Code reviews
+- Coding practice
+- GitHub labels
+
+## Contribution workflow
+
+Review the [Contribution workflow][4] to understand how to work with the code.
+
+Below is a list of workflow areas:
+- Building from source
+- Contribution workflow
+- Contour testing
+- Developer Certificate of Origin (DCO) sign off
+
+# Getting Started with the Website
+
+Updates, corrections, or improvements are managed through [GitHub][16] issues.
+
+When you are ready to take on an issue, see [Website Contribution Guidelines][5] to understand how the Contour website contributions are managed. There is information on:
+- Site structure
+- Link formatting
+- Testing
+- Setting up your environment
+
+# Getting Started with Documentation
+
+Documentation is critical to the success of any project. Open to all levels, Contour needs help to create and update its documentation. Join the [Contour Community Meetings][8] meeting and learn more about the Tech Docs Working Group.
+
+Review the [Contour Technical Documentation Contributing Guide][6] for instructions to set up your environment.
+
+Technical documentation will follow the [Website Contribution Guidelines][5].
+
+## New documentation suggestions
+
+If you have a document suggestion, create an issue in [GitHub][16]. The team will triage and prioritize the issue. Connect on Slack or in a meeting to discuss your issue or request.
+
+## Helping with identified document issues
+
+Take a look at the project issues list with the label **area/documentation**. If you are new to technical writing, add in the **good first issue** label:
+[area/documentation and good first issue][7]
+
+Reach out on Slack or a Contour meeting for any assistance. Help is always appreciated.
+
+# Filing and Working on Issues
+
+Whether code, website, or documentation, Contour uses GitHub to create, track, and manage all issues.
+
+If there is a fix or a suggestion for improvement, create an issue in [GitHub][16].
+
+All issues are reviewed and evaluated by the Contour team.
+
+# Meet the Community and the Team
+
+To find out more about contributing to Contour, connect with us at a Contour Community Meeting, on Slack, or through the mailing list. We also have an Office Hours meeting to answer “How do I…” questions.
+
+## Contour Community meetings
+
+Discuss issues, features, or suggestions with the Contour team and other community members. Ask anything and find out more about Contour.
+
+Ask questions:
+- “How do I do this in Contour?”
+- “Why does Contour do this thing this way?”
+- “Where can I find…?”
+
+See the [Community][8] page for:
+- Meeting schedule
+- Meeting notes with zoom link
+- Meeting recordings
+
+## Mailing list
+
+To get email updates to Contour, join the [mailing list][10]. Topics include:
+- Release notifications
+- Issues
+- Feedback and suggestions
+- Meeting notifications
+
+## Find us
+There are many ways to connect with the Contour team:
+
+- Slack: Kubernetes [#contour][11]
+- Contour YouTube Channel: [CNCF Contour][12]
+- Twitter: [@projectcontour][13]
+- GitHub: [projectcontour][14]
+
+# Want More Contributing Information?
+
+Slack or a meeting is a great way to introduce yourself. Let us know what you are interested in, your background, and what you want to accomplish.
+
+# Next steps
+
+Come out and join a [Community meeting][8] or an [Office Hours meeting][9]. Ask questions about how to get started or just sit back and get to know the team.
+
+
+
+[1]: {{< param github_url >}}/blob/main/CODE_OF_CONDUCT.md
+[2]: https://github.com/projectcontour
+[3]: {{< relref "resources/how-we-work.md" >}}
+[4]: {{< param github_url >}}/blob/main/CONTRIBUTING.md
+[5]: {{< param github_url >}}/blob/main/SITE_CONTRIBUTION.md
+[6]: {{< relref "resources/contributing-docs.md" >}}
+[7]: {{< param github_url >}}/issues/?q=is%3Aopen+is%3Aissue+label%3Aarea%2Fdocumentation+label%3A%22good+first+issue%22
+[8]: {{< relref "community.md" >}}
+[9]: https://github.com/projectcontour/community/wiki/Office-Hours
+[10]: https://lists.cncf.io/g/cncf-contour-users/
+[11]: {{< param slack_url >}}
+[12]: https://www.youtube.com/channel/UCCde7QSfcyYJ8AuXofD5bTA
+[13]: https://twitter.com/projectcontour
+[14]: https://github.com/projectcontour
+[15]: {{< relref "resources/philosophy.md" >}}
+[16]: {{< param github_url >}}/issues/
+[17]: {{< param github_url >}}/
\ No newline at end of file
diff --git a/site/content/docs/1.29/troubleshooting.md b/site/content/docs/1.29/troubleshooting.md
new file mode 100644
index 00000000000..28461bd8641
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting.md
@@ -0,0 +1,41 @@
+## Troubleshooting
+
+If you encounter issues, follow the guides below for help. For topics not covered here, you can [file an issue][0], or talk to us on the [#contour channel][1] on Kubernetes Slack.
+
+### [Troubleshooting Common Proxy Errors][2]
+A guide on how to investigate common errors with Contour and Envoy.
+
+### [Envoy Administration Access][3]
+Review the linked steps to learn how to access the administration interface for your Envoy instance.
+
+### [Contour Debug Logging][4]
+Learn how to enable debug logging to diagnose issues between Contour and the Kubernetes API.
+
+### [Envoy Debug Logging][5]
+Learn how to enable debug logging to diagnose TLS connection issues.
+
+### [Visualize the Contour Graph][6]
+Learn how to visualize Contour's internal object graph in [DOT][9] format, or as a png file.
+
+### [Show Contour xDS Resources][7]
+Review the linked steps to view the [xDS][10] resource data exchanged by Contour and Envoy.
+
+### [Profiling Contour][8]
+Learn how to profile Contour by using [net/http/pprof][11] handlers.
+
+### [Envoy container stuck in unready/draining state][12]
+Read the linked document if you have Envoy containers stuck in an unready/draining state.
+
+[0]: {{< param github_url >}}/issues
+[1]: {{< param slack_url >}}
+[2]: /docs/{{< param version >}}/troubleshooting/common-proxy-errors/
+[3]: /docs/{{< param version >}}/troubleshooting/envoy-admin-interface/
+[4]: /docs/{{< param version >}}/troubleshooting/contour-debug-log/
+[5]: /docs/{{< param version >}}/troubleshooting/envoy-debug-log/
+[6]: /docs/{{< param version >}}/troubleshooting/contour-graph/
+[7]: /docs/{{< param version >}}/troubleshooting/contour-xds-resources/
+[8]: /docs/{{< param version >}}/troubleshooting/profiling-contour/
+[9]: https://en.wikipedia.org/wiki/Dot
+[10]: https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol
+[11]: https://golang.org/pkg/net/http/pprof/
+[12]: /docs/{{< param version >}}/troubleshooting/envoy-container-draining/
diff --git a/site/content/docs/1.29/troubleshooting/common-proxy-errors.md b/site/content/docs/1.29/troubleshooting/common-proxy-errors.md
new file mode 100644
index 00000000000..e05f153242d
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/common-proxy-errors.md
@@ -0,0 +1,96 @@
+# Troubleshooting Common Proxy Errors
+
+## Unexpected HTTP errors
+
+Here are some steps to take in investigating common HTTP errors that users may encounter.
+We'll include example error cases to debug with these steps.
+
+1. Inspect the HTTP response in detail (possibly via `curl -v`).
+
+ Here we're looking to validate if the error response is coming from the backend app, Envoy, or possibly another proxy in front of Envoy.
+ If the response has the `server: envoy` header set, the request at least made it to the Envoy proxy so we can likely rule out anything before it.
+ The error may originate from Envoy itself or the backend app.
+ Look for headers or a response body that may originate from the backend app to verify if the error is in fact just the intended app behavior.
+ In the example below, we can see the response looks like it originates from Envoy, based on the `server: envoy` header and response body string.
+
+ ```
+ curl -vvv example.projectcontour.io
+ ...
+ > GET / HTTP/1.1
+ > Host: example.projectcontour.io
+ ...
+ >
+ < HTTP/1.1 503 Service Unavailable
+ < content-length: 91
+ < content-type: text/plain
+ < vary: Accept-Encoding
+ < date: Tue, 06 Feb 2024 03:44:30 GMT
+ < server: envoy
+ <
+ * Connection #0 to host example.projectcontour.io left intact
+ upstream connect error or disconnect/reset before headers. reset reason: connection failure
+ ```
+
+1. Look at the Envoy pod logs for the access logs corresponding to the erroring request/response.
+
+ The exact fields/field ordering present in the access log may vary if you have [configured a custom access log string or JSON access logs][0].
+ For example for a Contour installation using the [default Envoy access log format][1] we would want to inspect:
+ * `%REQ(:METHOD)%`, `%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%`, `%REQ(:AUTHORITY)%`, `%PROTOCOL%`: Ensure these are sensible values based on your configured route and HTTP request
+ * `%RESPONSE_FLAGS%`: See the [documentation on Envoy response flags][2] and below how to interpret a few of them in a Contour context:
+ * `UF`: Likely that Envoy could not connect to the upstream
+ * `UH`: Upstream Service has no health/ready pods
+ * `NR`: No configured route matching the request
+ * `%DURATION%`: Can correlate this with any configured timeouts
+ * `%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)%`: Can correlate this with any configured timeouts. If `-` then this is a hint the request was never forwarded to the upstream.
+ * `%UPSTREAM_HOST%`: This is the IP of the upstream pod that was selected to proxy the request to and can be used to verify the exact upstream instance that might be erroring.
+
+ For example in this access log:
+
+ ```
+ [2024-02-06T15:18:17.437Z] "GET / HTTP/1.1" 503 UF 0 91 1998 - "103.67.2.26" "curl/8.4.0" "d70640ec-2feb-46f8-9f63-24c44142c42e" "example.projectcontour.io" "10.244.8.27:8080"
+ ```
+
+ We can see the `UF` response flag as the cause of the `503` response code.
+ We also see the `-` for upstream request time.
+ It is likely in this case that Envoy was not able to establish a connection to the upstream.
+ That is further supported by the request duration of `1998` which is approximately the default upstream connection timeout of `2s`.
+
+1. Inspect Envoy metrics
+
+ This method of debugging can be useful especially for deployments that service a large volume of traffic.
+ In this case, access logs are possibly not suitable to use, as the volume of logs may be too large to pinpoint an exact erroring request.
+
+ Metrics from individual Envoy instances can be viewed manually or scraped using Envoy's prometheus endpoints and graphed using common visualization tools.
+ See the `/stats/prometheus` endpoint of the [Envoy admin interface][3].
+
+ Metrics that may be useful to inspect:
+ * [Listener metrics][4]
+ * `downstream_cx_total`
+ * `ssl.connection_error`
+ * [HTTP metrics][5]
+ * `downstream_cx_total`
+ * `downstream_cx_protocol_error`
+ * `downstream_rq_total`
+ * `downstream_rq_rx_reset`
+ * `downstream_rq_tx_reset`
+ * `downstream_rq_timeout`
+ * `downstream_rq_5xx` (and other status code groups)
+ * [Upstream metrics][6]
+ * `upstream_cx_total`
+ * `upstream_cx_connect_fail`
+ * `upstream_cx_connect_timeout`
+ * `upstream_rq_total`
+ * `upstream_rq_timeout`
+
+1. Send a direct request to the backend app to narrow down where the error may be originating.
+
+ This can be done via a port-forward to send a request to the app directly, skipping over the Envoy proxy.
+ If this sort of request succeeds, we know the issue likely originates from Contour configuration or the Envoy proxy rather than the app itself.
+
+[0]: /docs/{{< param latest_version >}}/config/access-logging/
+[1]: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#default-format-string
+[2]: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage#config-access-log-format-response-flags
+[3]: /docs/{{< param latest_version >}}/guides/prometheus/#envoy-metrics
+[4]: https://www.envoyproxy.io/docs/envoy/latest/configuration/listeners/stats
+[5]: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/stats
+[6]: https://www.envoyproxy.io/docs/envoy/latest/configuration/upstream/cluster_manager/cluster_stats
diff --git a/site/content/docs/1.29/troubleshooting/contour-debug-log.md b/site/content/docs/1.29/troubleshooting/contour-debug-log.md
new file mode 100644
index 00000000000..821634242c6
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/contour-debug-log.md
@@ -0,0 +1,6 @@
+# Enabling Contour Debug Logging
+
+The `contour serve` subcommand has two command-line flags that can be helpful for debugging.
+The `--debug` flag enables general Contour debug logging, which logs more information about how Contour is processing API resources.
+The `--kubernetes-debug` flag enables verbose logging in the Kubernetes client API, which can help debug interactions between Contour and the Kubernetes API server.
+This flag requires an integer log level argument, where higher number indicates more detailed logging.
diff --git a/site/content/docs/1.29/troubleshooting/contour-graph.md b/site/content/docs/1.29/troubleshooting/contour-graph.md
new file mode 100644
index 00000000000..5abcfeb22af
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/contour-graph.md
@@ -0,0 +1,25 @@
+# Visualizing Contour's Internal Object Graph
+
+Contour models its configuration using a directed acyclic graph (DAG) of internal objects.
+This can be visualized through a debug endpoint that outputs the DAG in [DOT][2] format.
+To visualize the graph, you must have [`graphviz`][3] installed on your system.
+
+To download the graph and save it as a PNG:
+
+```bash
+# Port forward into the contour pod
+$ CONTOUR_POD=$(kubectl -n projectcontour get pod -l app=contour -o name | head -1)
+# Do the port forward to that pod
+$ kubectl -n projectcontour port-forward $CONTOUR_POD 6060
+# Download and store the DAG in png format
+$ curl localhost:6060/debug/dag | dot -T png > contour-dag.png
+```
+
+The following is an example of a DAG that maps `http://kuard.local:80/` to the
+`kuard` service in the `default` namespace:
+
+![Sample DAG][4]
+
+[2]: https://en.wikipedia.org/wiki/DOT
+[3]: https://graphviz.gitlab.io/
+[4]: /img/kuard-dag.png
diff --git a/site/content/docs/1.29/troubleshooting/contour-xds-resources.md b/site/content/docs/1.29/troubleshooting/contour-xds-resources.md
new file mode 100644
index 00000000000..69f413a8cb3
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/contour-xds-resources.md
@@ -0,0 +1,19 @@
+# Interrogate Contour's xDS Resources
+
+Sometimes it's helpful to be able to interrogate Contour to find out exactly what [xDS][1] resource data it is sending to Envoy.
+Contour ships with a `contour cli` subcommand which can be used for this purpose.
+
+Because Contour secures its communications with Envoy using Secrets in the cluster, the easiest way is to run `contour cli` commands _inside_ the pod.
+Do this is via `kubectl exec`:
+
+```bash
+# Get one of the pods that matches the examples/daemonset
+$ CONTOUR_POD=$(kubectl -n projectcontour get pod -l app=contour -o jsonpath='{.items[0].metadata.name}')
+# Do the port forward to that pod
+$ kubectl -n projectcontour exec $CONTOUR_POD -c contour -- contour cli lds --cafile=/certs/ca.crt --cert-file=/certs/tls.crt --key-file=/certs/tls.key
+```
+
+Which will stream changes to the LDS api endpoint to your terminal.
+Replace `contour cli lds` with `contour cli rds` for route resources, `contour cli cds` for cluster resources, and `contour cli eds` for endpoints.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol
diff --git a/site/content/docs/1.29/troubleshooting/envoy-admin-interface.md b/site/content/docs/1.29/troubleshooting/envoy-admin-interface.md
new file mode 100644
index 00000000000..c44b6fe3e9d
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/envoy-admin-interface.md
@@ -0,0 +1,32 @@
+# Accessing the Envoy Administration Interface
+
+Getting access to the Envoy [administration interface][1] can be useful for diagnosing issues with routing or cluster health.
+However, Contour doesn't expose the entire Envoy Administration interface since that interface contains many options, such as shutting down Envoy or draining traffic.
+To prohibit this behavior, Contour only exposes the read-only options from the admin interface which still allows for debugging Envoy, but without the options mentioned previously.
+
+Those endpoints are:
+ - /certs
+ - /clusters
+ - /listeners
+ - /config_dump
+ - /memory
+ - /ready
+ - /runtime
+ - /server_info
+ - /stats
+ - /stats/prometheus
+ - /stats/recentlookups
+
+The Envoy administration interface is bound by default to `http://127.0.0.1:9001`.
+To access it from your workstation use `kubectl port-forward` like so:
+
+```sh
+# Get one of the pods that matches the Envoy daemonset
+ENVOY_POD=$(kubectl -n projectcontour get pod -l app=envoy -o name | head -1)
+# Do the port forward to that pod
+kubectl -n projectcontour port-forward $ENVOY_POD 9001
+```
+
+Then navigate to `http://127.0.0.1:9001/` to access the administration interface for the Envoy container running on that pod.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/operations/admin
diff --git a/site/content/docs/1.29/troubleshooting/envoy-container-draining.md b/site/content/docs/1.29/troubleshooting/envoy-container-draining.md
new file mode 100644
index 00000000000..82bb47cb883
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/envoy-container-draining.md
@@ -0,0 +1,29 @@
+# Envoy container stuck in unready/draining state
+
+It's possible for the Envoy containers to become stuck in an unready/draining state.
+This is an unintended side effect of the shutdown-manager sidecar container being restarted by the kubelet.
+For more details on exactly how this happens, see [this issue][1].
+
+If you observe Envoy containers in this state, you should `kubectl delete` them to allow new Pods to be created to replace them.
+
+To make this issue less likely to occur, you should:
+- ensure you have [resource requests][2] on all your containers
+- ensure you do **not** have a liveness probe on the shutdown-manager sidecar container in the envoy daemonset (this was removed from the example YAML in Contour 1.24.0).
+
+If the above are not sufficient for preventing the issue, you may also add a liveness probe to the envoy container itself, like the following:
+
+```yaml
+livenessProbe:
+ httpGet:
+ path: /ready
+ port: 8002
+ initialDelaySeconds: 15
+ periodSeconds: 5
+ failureThreshold: 6
+```
+
+This will cause the kubelet to restart the envoy container if it does get stuck in this state, resulting in a return to normal operations load balancing traffic.
+Note that in this case, it's possible that a graceful drain of connections may or may not occur, depending on the exact sequence of operations that preceded the envoy container failing the liveness probe.
+
+[1]: https://github.com/projectcontour/contour/issues/4851
+[2]: /docs/{{< param latest_version >}}/deploy-options/#setting-resource-requests-and-limits
\ No newline at end of file
diff --git a/site/content/docs/1.29/troubleshooting/envoy-debug-log.md b/site/content/docs/1.29/troubleshooting/envoy-debug-log.md
new file mode 100644
index 00000000000..bfef4fa5531
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/envoy-debug-log.md
@@ -0,0 +1,8 @@
+# Enabling Envoy Debug Logging
+
+The `envoy` command has a `--log-level` [flag][1] that can be useful for debugging.
+By default, it's set to `info`.
+To change it to `debug`, edit the `envoy` DaemonSet in the `projectcontour` namespace and replace the `--log-level info` flag with `--log-level debug`.
+Setting the Envoy log level to `debug` can be particilarly useful for debugging TLS connection failures.
+
+[1]: https://www.envoyproxy.io/docs/envoy/latest/operations/cli
diff --git a/site/content/docs/1.29/troubleshooting/profiling-contour.md b/site/content/docs/1.29/troubleshooting/profiling-contour.md
new file mode 100644
index 00000000000..95bb0164210
--- /dev/null
+++ b/site/content/docs/1.29/troubleshooting/profiling-contour.md
@@ -0,0 +1,14 @@
+# Accessing Contour's /debug/pprof Service
+
+Contour exposes the [net/http/pprof][1] handlers for `go tool pprof` and `go tool trace` by default on `127.0.0.1:6060`.
+This service is useful for profiling Contour.
+To access it from your workstation use `kubectl port-forward` like so,
+
+```bash
+# Get one of the pods that matches the Contour deployment
+$ CONTOUR_POD=$(kubectl -n projectcontour get pod -l app=contour -o name | head -1)
+# Do the port forward to that pod
+$ kubectl -n projectcontour port-forward $CONTOUR_POD 6060
+```
+
+[1]: https://golang.org/pkg/net/http/pprof
diff --git a/site/content/resources/compatibility-matrix.md b/site/content/resources/compatibility-matrix.md
index a4326642408..f2d57fd78d1 100644
--- a/site/content/resources/compatibility-matrix.md
+++ b/site/content/resources/compatibility-matrix.md
@@ -11,6 +11,7 @@ These combinations of versions are specifically tested in CI and supported by th
| Contour Version | Envoy Version | Kubernetes Versions | Gateway API Version |
| --------------- | :------------------- | ------------------- | --------------------|
| main | [1.30.1][53] | 1.29, 1.28, 1.27 | [1.0.0][110] |
+| 1.29.0 | [1.30.1][53] | 1.29, 1.28, 1.27 | [1.0.0][110] |
| 1.28.4 | [1.29.4][55] | 1.29, 1.28, 1.27 | [1.0.0][110] |
| 1.28.3 | [1.29.3][50] | 1.29, 1.28, 1.27 | [1.0.0][110] |
| 1.28.2 | [1.29.2][49] | 1.29, 1.28, 1.27 | [1.0.0][110] |
diff --git a/site/data/docs/1-29-toc.yml b/site/data/docs/1-29-toc.yml
new file mode 100644
index 00000000000..151db52d41a
--- /dev/null
+++ b/site/data/docs/1-29-toc.yml
@@ -0,0 +1,151 @@
+toc:
+ - title: Introduction
+ subfolderitems:
+ - page: Contour Architecture
+ url: /architecture
+ - page: Contour Philosophy
+ link: /resources/philosophy
+ - title: Configuration
+ subfolderitems:
+ - page: HTTPProxy Fundamentals
+ url: /config/fundamentals
+ - page: Gateway API Support
+ url: /config/gateway-api
+ - page: Ingress v1 Support
+ url: /config/ingress
+ - page: Virtual Hosts
+ url: /config/virtual-hosts
+ - page: Inclusion and Delegation
+ url: /config/inclusion-delegation
+ - page: TLS Termination
+ url: /config/tls-termination
+ - page: Upstream TLS
+ url: /config/upstream-tls
+ - page: Request Routing
+ url: /config/request-routing
+ - page: External Service Routing
+ url: /config/external-service-routing
+ - page: Request Rewriting
+ url: /config/request-rewriting
+ - page: CORS
+ url: /config/cors
+ - page: Websockets
+ url: /config/websockets
+ - page: Upstream Health Checks
+ url: /config/health-checks
+ - page: Client Authorization
+ url: /config/client-authorization
+ - page: TLS Delegation
+ url: /config/tls-delegation
+ - page: Rate Limiting
+ url: /config/rate-limiting
+ - page: Access logging
+ url: /config/access-logging
+ - page: Cookie Rewriting
+ url: /config/cookie-rewriting
+ - page: Overload Manager
+ url: /config/overload-manager
+ - page: JWT Verification
+ url: /config/jwt-verification
+ - page: IP Filtering
+ url: /config/ip-filtering
+ - page: Annotations Reference
+ url: /config/annotations
+ - page: Slow Start Mode
+ url: /config/slow-start
+ - page: Tracing Support
+ url: /config/tracing
+ - page: API Reference
+ url: /config/api
+ - title: Deployment
+ subfolderitems:
+ - page: Deployment Options
+ url: /deploy-options
+ - page: Contour Configuration
+ url: /configuration
+ - page: Upgrading Contour
+ link: /resources/upgrading
+ - page: Enabling TLS between Envoy and Contour
+ url: /grpc-tls-howto
+ - page: Redeploy Envoy
+ url: /redeploy-envoy
+ - title: Guides
+ subfolderitems:
+ - page: Deploying Contour on AWS with NLB
+ url: /guides/deploy-aws-nlb/
+ - page: AWS Network Load Balancer TLS Termination with Contour
+ url: /guides/deploy-aws-tls-nlb/
+ - page: Deploying HTTPS services with Contour and cert-manager
+ url: /guides/cert-manager/
+ - page: External Authorization Support
+ url: /guides/external-authorization/
+ - page: FIPS 140-2 in Contour
+ url: /guides/fips
+ - page: Using Gatekeeper with Contour
+ url: /guides/gatekeeper
+ - page: Using Gateway API with Contour
+ url: /guides/gateway-api
+ - page: Global Rate Limiting
+ url: /guides/global-rate-limiting
+ - page: Configuring ingress to gRPC services with Contour
+ url: /guides/grpc
+ - page: Health Checking
+ url: /guides/health-checking
+ - page: Creating a Contour-compatible kind cluster
+ url: /guides/kind
+ - page: Collecting Metrics with Prometheus
+ url: /guides/prometheus/
+ - page: How to Configure PROXY Protocol v1/v2 Support
+ url: /guides/proxy-proto/
+ - page: Contour/Envoy Resource Limits
+ url: /guides/resource-limits/
+ - title: Troubleshooting
+ subfolderitems:
+ - page: Troubleshooting Common Proxy Errors
+ url: /troubleshooting/common-proxy-errors
+ - page: Envoy Administration Access
+ url: /troubleshooting/envoy-admin-interface
+ - page: Contour Debug Logging
+ url: /troubleshooting/contour-debug-log
+ - page: Envoy Debug Logging
+ url: /troubleshooting/envoy-debug-log
+ - page: Visualize the Contour Graph
+ url: /troubleshooting/contour-graph
+ - page: Show Contour xDS Resources
+ url: /troubleshooting/contour-xds-resources
+ - page: Profiling Contour
+ url: /troubleshooting/profiling-contour
+ - page: Envoy Container Stuck in Unready State
+ url: /troubleshooting/envoy-container-draining
+ - title: Resources
+ subfolderitems:
+ - page: Support Policy
+ link: /resources/support
+ - page: Compatibility Matrix
+ link: /resources/compatibility-matrix
+ - page: Contour Deprecation Policy
+ link: /resources/deprecation-policy
+ - page: Release Process
+ link: /resources/release-process
+ - page: Frequently Asked Questions
+ link: /resources/faq
+ - page: Tagging
+ link: /resources/tagging
+ - page: Adopters
+ link: /resources/adopters
+ - page: Ecosystem
+ link: /resources/ecosystem
+ - title: Security
+ subfolderitems:
+ - page: Threat Model and Security Posture
+ link: /resources/security-threat-model
+ - page: Security Report Process
+ link: /resources/security-process
+ - page: Security Fix Checklist
+ link: /resources/security-checklist
+ - title: Contribute
+ subfolderitems:
+ - page: Start Contributing
+ url: /start-contributing
+ - page: How We Work
+ link: /resources/how-we-work
diff --git a/site/data/docs/toc-mapping.yml b/site/data/docs/toc-mapping.yml
index 4efb5bf2424..d1ccf6597f8 100644
--- a/site/data/docs/toc-mapping.yml
+++ b/site/data/docs/toc-mapping.yml
@@ -51,3 +51,4 @@ v1.19.1: v1-19-1-toc
"1.26": 1-26-toc
"1.27": 1-27-toc
"1.28": 1-28-toc
+"1.29": 1-29-toc
diff --git a/versions.yaml b/versions.yaml
index 47f76dd4d36..51bdd69e37d 100644
--- a/versions.yaml
+++ b/versions.yaml
@@ -14,6 +14,16 @@ versions:
- "1.27"
gateway-api:
- "1.0.0"
+ - version: v1.29.0
+ supported: "true"
+ dependencies:
+ envoy: "1.30.1"
+ kubernetes:
+ - "1.29"
+ - "1.28"
+ - "1.27"
+ gateway-api:
+ - "1.0.0"
- version: v1.28.4
supported: "true"
dependencies:
@@ -105,7 +115,7 @@ versions:
gateway-api:
- "0.8.1"
- version: v1.26.3
- supported: "true"
+ supported: "false"
dependencies:
envoy: "1.27.4"
kubernetes: