diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4bbcbc9e31ad6..9d4c17628587d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,40 @@
# Changelog
+## 4.1.1
+
+This release of Teleport contains a bug fix.
+
+* Fixed an issue with multi-cluster EKS when the Teleport proxy runs outside EKS. [#3070](https://github.com/gravitational/teleport/pull/3070)
+
+## 4.1.0
+
+This is a major Teleport release with a focus on stability and bug fixes.
+
+### Improvements
+
+* Support for IPv6. [#2124](https://github.com/gravitational/teleport/issues/2124)
+* Kubernetes support does not require SNI. [#2766](https://github.com/gravitational/teleport/issues/2766)
+* Support use of a path for `auth_token` in `teleport.yaml`. [#2515](https://github.com/gravitational/teleport/issues/2515)
+* Implement ProxyJump compatibility. [#2543](https://github.com/gravitational/teleport/issues/2543)
+* Audit logs should show roles. [#2823](https://github.com/gravitational/teleport/issues/2823)
+* Allow tsh to go background and without executing remote command. [#2297](https://github.com/gravitational/teleport/issues/2297)
+* Provide a high level tool to backup and restore the cluster state. [#2480](https://github.com/gravitational/teleport/issues/2480)
+* Investigate nodes using stale list when connecting to proxies (discovery protocol). [#2832](https://github.com/gravitational/teleport/issues/2832)
+
+### Fixes
+
+* Proxy can hang due to invalid OIDC connector. [#2690](https://github.com/gravitational/teleport/issues/2690)
+* Proper `-D` flag parsing. [#2663](https://github.com/gravitational/teleport/issues/2663)
+* tsh status does not show correct cluster name. [#2671](https://github.com/gravitational/teleport/issues/2671)
+* Teleport truncates MOTD with PAM. [#2477](https://github.com/gravitational/teleport/issues/2477)
+* Miscellaneous fixes around error handling and reporting.
+
+## 4.0.10
+
+This release of Teleport contains a bug fix.
+
+* Fixed a goroutine leak that occured whenever a leaf cluster disconnected from the root cluster. [#3037](https://github.com/gravitational/teleport/pull/3037)
+
## 4.0.9
This release of Teleport contains a bug fix.
diff --git a/Makefile b/Makefile
index ecbfe9e99cad3..69934ca0579b8 100644
--- a/Makefile
+++ b/Makefile
@@ -10,7 +10,9 @@
# Naming convention:
# for stable releases we use "1.0.0" format
# for pre-releases, we use "1.0.0-beta.2" format
-VERSION=4.2.0-alpha.1
+VERSION=4.2.0-alpha.2
+
+DOCKER_IMAGE ?= quay.io/gravitational/teleport
# These are standard autotools variables, don't change them please
BUILDDIR ?= build
@@ -318,12 +320,12 @@ install: build
.PHONY: image
image:
cp ./build.assets/charts/Dockerfile $(BUILDDIR)/
- cd $(BUILDDIR) && docker build --no-cache . -t quay.io/gravitational/teleport:$(VERSION)
+ cd $(BUILDDIR) && docker build --no-cache . -t $(DOCKER_IMAGE):$(VERSION)
if [ -f e/Makefile ]; then $(MAKE) -C e image; fi
.PHONY: publish
publish:
- docker push quay.io/gravitational/teleport:$(VERSION)
+ docker push $(DOCKER_IMAGE):$(VERSION)
if [ -f e/Makefile ]; then $(MAKE) -C e publish; fi
.PHONY: print-version
diff --git a/assets/marketplace/Jenkinsfile-build-ent b/assets/marketplace/Jenkinsfile-build-ent
index 68bc592bfec1f..50d2c0f39f6fc 100644
--- a/assets/marketplace/Jenkinsfile-build-ent
+++ b/assets/marketplace/Jenkinsfile-build-ent
@@ -7,7 +7,7 @@ pipeline {
timestamps()
}
parameters {
- string(name: 'version', defaultValue: '3.1.7', description: 'Teleport version to build')
+ string(name: 'version', defaultValue: '4.1.0', description: 'Teleport version to build')
}
stages {
stage('Create files/build directory') {
@@ -20,7 +20,7 @@ pipeline {
stage('Run Packer to build specified version') {
steps {
dir('assets/marketplace') {
- sh "PUBLIC_AMI_NAME=gravitational-teleport-ami-ent-${params.version} MARKETPLACE_AMI_NAME=gravitational-teleport-marketplace-ami-ent-${params.version} TELEPORT_VERSION=${params.version} make ent-jenkins-build"
+ sh "PUBLIC_AMI_NAME=gravitational-teleport-ami-ent-${params.version} FIPS_AMI_NAME=gravitational-teleport-ami-ent-${params.version}-fips MARKETPLACE_AMI_NAME=gravitational-teleport-marketplace-ami-ent-${params.version} TELEPORT_VERSION=${params.version} make ent-jenkins-build"
}
}
}
@@ -28,6 +28,7 @@ pipeline {
steps {
dir('assets/marketplace') {
sh 'make change-amis-to-public-ent'
+ sh 'make change-amis-to-public-ent-fips'
}
}
}
diff --git a/assets/marketplace/Jenkinsfile-build-oss b/assets/marketplace/Jenkinsfile-build-oss
index 7fa8596ecf723..a0b515316238a 100644
--- a/assets/marketplace/Jenkinsfile-build-oss
+++ b/assets/marketplace/Jenkinsfile-build-oss
@@ -7,7 +7,7 @@ pipeline {
timestamps()
}
parameters {
- string(name: 'version', defaultValue: '3.1.7', description: 'Teleport version to build')
+ string(name: 'version', defaultValue: '4.1.0', description: 'Teleport version to build')
}
stages {
stage('Create files/build directory') {
diff --git a/assets/marketplace/Makefile b/assets/marketplace/Makefile
index 1e6a7348d91a1..17a40b8f81fae 100644
--- a/assets/marketplace/Makefile
+++ b/assets/marketplace/Makefile
@@ -14,7 +14,7 @@ MARKETPLACE_AMI_NAME ?=
AWS_REGION ?= us-west-2
# Teleport version
-TELEPORT_VERSION ?= 4.0.4
+TELEPORT_VERSION ?= 4.1.0
# Teleport UID is the UID of a non-privileged 'teleport' user
TELEPORT_UID ?= 1007
@@ -52,7 +52,7 @@ oss:
@echo "Building image $(TELEPORT_VERSION) $(TELEPORT_TYPE)"
@echo "BUILD_TIMESTAMP=$(BUILD_TIMESTAMP)"
mkdir -p files/build
- packer build -force -var build_timestamp=$(BUILD_TIMESTAMP) -except teleport-aws-linux-marketplace single-ami.json
+ packer build -force -var build_timestamp=$(BUILD_TIMESTAMP) -only teleport-aws-linux single-ami.json
@echo "$(BUILD_TIMESTAMP)" > files/build/oss_build_timestamp.txt
# Build named 'production' AMI and marketplace version
@@ -65,7 +65,7 @@ oss-jenkins-build:
@echo "Marketplace AMI name: $(MARKETPLACE_AMI_NAME)"
@echo "BUILD_TIMESTAMP=$(BUILD_TIMESTAMP)"
mkdir -p files/build
- packer build -force -var ami_name=$(PUBLIC_AMI_NAME) -var marketplace_ami_name=$(MARKETPLACE_AMI_NAME) -var build_type=production -var build_timestamp=$(BUILD_TIMESTAMP) single-ami.json
+ packer build -force -var ami_name=$(PUBLIC_AMI_NAME) -var marketplace_ami_name=$(MARKETPLACE_AMI_NAME) -var build_timestamp=$(BUILD_TIMESTAMP) -except teleport-aws-linux-fips single-ami.json
@echo "$(BUILD_TIMESTAMP)" > files/build/oss_build_timestamp.txt
.PHONY: change-amis-to-public-oss
@@ -80,7 +80,7 @@ ent: check-vars
@echo "Building image $(TELEPORT_VERSION) $(TELEPORT_TYPE)"
@echo "BUILD_TIMESTAMP=$(BUILD_TIMESTAMP)"
mkdir -p files/build
- packer build -force -var build_timestamp=$(BUILD_TIMESTAMP) -except teleport-aws-linux-marketplace single-ami.json
+ packer build -force -var build_timestamp=$(BUILD_TIMESTAMP) -only teleport-aws-linux single-ami.json
@echo "$(BUILD_TIMESTAMP)" > files/build/ent_build_timestamp.txt
# Build named 'production' AMI and marketplace version
@@ -90,10 +90,11 @@ ent-jenkins-build: check-vars
ent-jenkins-build:
@echo "Building image $(TELEPORT_VERSION) $(TELEPORT_TYPE) via Jenkins"
@echo "Public AMI name: $(PUBLIC_AMI_NAME)"
+ @echo "FIPS AMI name: $(FIPS_AMI_NAME)"
@echo "Marketplace AMI name: $(MARKETPLACE_AMI_NAME)"
@echo "BUILD_TIMESTAMP=$(BUILD_TIMESTAMP)"
mkdir -p files/build
- packer build -force -var ami_name=$(PUBLIC_AMI_NAME) -var marketplace_ami_name=$(MARKETPLACE_AMI_NAME) -var build_type=production -var build_timestamp=$(BUILD_TIMESTAMP) single-ami.json
+ packer build -force -var ami_name=$(PUBLIC_AMI_NAME) -var fips_ami_name=$(FIPS_AMI_NAME) -var marketplace_ami_name=$(MARKETPLACE_AMI_NAME) -var build_timestamp=$(BUILD_TIMESTAMP) single-ami.json
@echo "$(BUILD_TIMESTAMP)" > files/build/ent_build_timestamp.txt
.PHONY: change-amis-to-public-ent
@@ -101,6 +102,11 @@ change-amis-to-public-ent:
@echo "Making Enterprise AMIs public"
bash files/make-amis-public.sh ent
+.PHONY: change-amis-to-public-ent-fips
+change-amis-to-public-ent-fips:
+ @echo "Making FIPS Enterprise AMIs public"
+ bash files/make-amis-public.sh ent-fips
+
# Other helpers
.PHONY: check-vars
diff --git a/assets/marketplace/cloudformation/files/install.sh b/assets/marketplace/cloudformation/files/install.sh
index 0a7f07332886e..9c96263527b1c 100644
--- a/assets/marketplace/cloudformation/files/install.sh
+++ b/assets/marketplace/cloudformation/files/install.sh
@@ -26,8 +26,8 @@ useradd -r teleport -u ${TELEPORT_UID}
usermod -a -G adm teleport
# Setup teleport run dir for pid files
-mkdir -p /var/run/teleport/ /var/lib/teleport /etc/teleport.d
-chown -R teleport:adm /var/run/teleport /var/lib/teleport /etc/teleport.d/
+mkdir -p /run/teleport/ /var/lib/teleport /etc/teleport.d
+chown -R teleport:adm /run/teleport /var/lib/teleport /etc/teleport.d/
# Download and install teleport binaries
pushd /tmp
diff --git a/assets/marketplace/cloudformation/files/system/teleport-auth.service b/assets/marketplace/cloudformation/files/system/teleport-auth.service
index 393dde14c1ad8..baf2542974efe 100644
--- a/assets/marketplace/cloudformation/files/system/teleport-auth.service
+++ b/assets/marketplace/cloudformation/files/system/teleport-auth.service
@@ -12,7 +12,7 @@ RestartSec=5
RuntimeDirectory=teleport
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/cloudformation/files/system/teleport-node.service b/assets/marketplace/cloudformation/files/system/teleport-node.service
index 5e2d6871e355b..ae6ac91e393b3 100644
--- a/assets/marketplace/cloudformation/files/system/teleport-node.service
+++ b/assets/marketplace/cloudformation/files/system/teleport-node.service
@@ -13,7 +13,7 @@ RuntimeDirectory=teleport
ExecStartPre=/usr/bin/teleport-ssm-get-token
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/cloudformation/files/system/teleport-proxy.service b/assets/marketplace/cloudformation/files/system/teleport-proxy.service
index 50df1a5809269..d92344bfb2d92 100644
--- a/assets/marketplace/cloudformation/files/system/teleport-proxy.service
+++ b/assets/marketplace/cloudformation/files/system/teleport-proxy.service
@@ -15,7 +15,7 @@ ExecStartPre=/usr/bin/teleport-ssm-get-token
ExecStartPre=/bin/aws s3 sync s3://${TELEPORT_S3_BUCKET}/live/${TELEPORT_DOMAIN_NAME} /var/lib/teleport
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/files/bin/teleport-generate-config b/assets/marketplace/files/bin/teleport-generate-config
index f525346d0c650..0085b3adaae7c 100755
--- a/assets/marketplace/files/bin/teleport-generate-config
+++ b/assets/marketplace/files/bin/teleport-generate-config
@@ -9,12 +9,12 @@ if [ -f /etc/teleport.yaml ]; then
cp /etc/teleport.yaml /etc/teleport.yaml.old
fi
-# Setup teleport config file
+# Setup Teleport config file
LOCAL_IP=$(curl -sS http://169.254.169.254/latest/meta-data/local-ipv4)
LOCAL_HOSTNAME=$(curl -sS http://169.254.169.254/latest/meta-data/local-hostname)
LOCAL_HOSTNAME=${LOCAL_HOSTNAME//./-}
-# Source variables set up by cloudformation template
+# Source variables from user-data
source /etc/teleport.d/conf
# Set host UUID so auth server picks it up, as each auth server's
@@ -33,6 +33,18 @@ if [[ "${USE_ACM}" != "true" ]]; then
echo "use-letsencrypt" > /etc/teleport.d/use-letsencrypt
fi
+# Determine whether this is a FIPS AMI or not
+# We do this by looking at the ExecStart command for teleport.service to see whether it contains 'fips' or not (which is set during packer build)
+# We use this to modify the auth service's configuration depending on whether FIPS is in use or not
+# With FIPS: auth_service.authentication.local_auth must be 'false' or Teleport will not start
+# Without FIPS: auth.service.authentication.second_factor should be set to 'otp'
+FIPS_AMI=false
+AUTHENTICATION_STANZA="second_factor: otp"
+if grep "ExecStart" /etc/systemd/system/teleport.service | grep -q "fips"; then
+ FIPS_AMI=true
+ AUTHENTICATION_STANZA="local_auth: false"
+fi
+
if [[ "${TELEPORT_ROLE}" == "auth" ]]; then
echo "auth" > /etc/teleport.d/role.auth
# Teleport Auth server is using DynamoDB as a backend
@@ -63,7 +75,7 @@ auth_service:
enabled: yes
listen_addr: 0.0.0.0:3025
authentication:
- second_factor: otp
+ ${AUTHENTICATION_STANZA}
cluster_name: ${TELEPORT_CLUSTER_NAME}
EOF
@@ -239,9 +251,8 @@ teleport:
auth_service:
enabled: yes
listen_addr: 0.0.0.0:3025
-
authentication:
- second_factor: otp
+ ${AUTHENTICATION_STANZA}
ssh_service:
enabled: yes
@@ -426,5 +437,7 @@ EOF
systemctl restart telegraf.service
fi
-# make sure config file can be edited by pre-start commands running later
-chown teleport:adm /etc/teleport.yaml
\ No newline at end of file
+# make sure config file can be edited by pre-start commands running later (assuming it exists)
+if [ -f /etc/teleport.yaml ]; then
+ chown teleport:adm /etc/teleport.yaml
+fi
\ No newline at end of file
diff --git a/assets/marketplace/files/install.sh b/assets/marketplace/files/install.sh
index 4e7cf6ce7bb0a..82c81427c91c7 100644
--- a/assets/marketplace/files/install.sh
+++ b/assets/marketplace/files/install.sh
@@ -32,10 +32,11 @@ rm -f /tmp/influxdb.rpm
# Install certbot to rotate certificates
# Certbot is a tool to request letsencrypt certificates,
# remove it if you don't need letsencrypt.
-curl ${CURL_OPTS} -O https://bootstrap.pypa.io/get-pip.py
-python2.7 get-pip.py
-pip install -I awscli requests[security]==2.18.4
-pip install certbot==0.21.0 certbot-dns-route53==0.21.0
+sudo yum -y install python3 python3-pip
+#curl ${CURL_OPTS} -O https://bootstrap.pypa.io/get-pip.py
+#python3 get-pip.py
+pip3 install -I awscli requests[security]==2.18.4
+pip3 install certbot==0.21.0 certbot-dns-route53==0.21.0
# Create teleport user. It is helpful to share the same UID
# to have the same permissions on shared NFS volumes across auth servers and for consistency.
@@ -44,23 +45,34 @@ useradd -r teleport -u ${TELEPORT_UID} -d /var/lib/teleport
usermod -a -G adm teleport
# Setup teleport run dir for pid files
-mkdir -p /var/run/teleport/ /var/lib/teleport /etc/teleport.d
-chown -R teleport:adm /var/run/teleport /var/lib/teleport /etc/teleport.d/
+mkdir -p /run/teleport/ /var/lib/teleport /etc/teleport.d
+chown -R teleport:adm /run/teleport /var/lib/teleport /etc/teleport.d/
# Download and install teleport binaries
pushd /tmp
-if [[ "${TELEPORT_TYPE}" == "oss" ]]; then
- echo "Installing OSS Teleport version ${TELEPORT_VERSION}"
- curl ${CURL_OPTS} -o teleport.tar.gz https://s3.amazonaws.com/clientbuilds.gravitational.io/teleport/${TELEPORT_VERSION}/teleport-v${TELEPORT_VERSION}-linux-amd64-bin.tar.gz
- tar -xzf teleport.tar.gz
- cp teleport/tctl teleport/tsh teleport/teleport /usr/bin
- rm -rf /tmp/teleport.tar.gz /tmp/teleport
-else
- echo "Installing Enterprise Teleport version ${TELEPORT_VERSION}"
- curl ${CURL_OPTS} -o teleport.tar.gz https://get.gravitational.com/teleport/${TELEPORT_VERSION}/teleport-ent-v${TELEPORT_VERSION}-linux-amd64-bin.tar.gz
- tar -xzf teleport.tar.gz
- cp teleport-ent/tctl teleport-ent/tsh teleport-ent/teleport /usr/bin
- rm -rf /tmp/teleport.tar.gz /tmp/teleport-ent
+# Install the FIPS version of Teleport if /tmp/teleport-fips is present
+if [ -f /tmp/teleport-fips ]; then
+ echo "Installing Enterprise Teleport version ${TELEPORT_VERSION} with FIPS support"
+ curl ${CURL_OPTS} -o teleport.tar.gz https://get.gravitational.com/teleport/${TELEPORT_VERSION}/teleport-ent-v${TELEPORT_VERSION}-linux-amd64-fips-bin.tar.gz
+ tar -xzf teleport.tar.gz
+ cp teleport-ent/tctl teleport-ent/tsh teleport-ent/teleport /usr/bin
+ rm -rf /tmp/teleport.tar.gz /tmp/teleport-ent
+ # add --fips to 'teleport start' commands in FIPS mode
+ sed -i -E "s_ExecStart=/usr/bin/teleport start(.*)_ExecStart=/usr/bin/teleport start --fips\1_g" /etc/systemd/system/teleport*.service
+else
+ if [[ "${TELEPORT_TYPE}" == "oss" ]]; then
+ echo "Installing OSS Teleport version ${TELEPORT_VERSION}"
+ curl ${CURL_OPTS} -o teleport.tar.gz https://s3.amazonaws.com/clientbuilds.gravitational.io/teleport/${TELEPORT_VERSION}/teleport-v${TELEPORT_VERSION}-linux-amd64-bin.tar.gz
+ tar -xzf teleport.tar.gz
+ cp teleport/tctl teleport/tsh teleport/teleport /usr/bin
+ rm -rf /tmp/teleport.tar.gz /tmp/teleport
+ else
+ echo "Installing Enterprise Teleport version ${TELEPORT_VERSION}"
+ curl ${CURL_OPTS} -o teleport.tar.gz https://get.gravitational.com/teleport/${TELEPORT_VERSION}/teleport-ent-v${TELEPORT_VERSION}-linux-amd64-bin.tar.gz
+ tar -xzf teleport.tar.gz
+ cp teleport-ent/tctl teleport-ent/tsh teleport-ent/teleport /usr/bin
+ rm -rf /tmp/teleport.tar.gz /tmp/teleport-ent
+ fi
fi
popd
diff --git a/assets/marketplace/files/make-amis-public.sh b/assets/marketplace/files/make-amis-public.sh
index cdaaeaca9afe3..c332386e1386f 100755
--- a/assets/marketplace/files/make-amis-public.sh
+++ b/assets/marketplace/files/make-amis-public.sh
@@ -6,7 +6,7 @@ REGION_LIST="eu-west-1 us-east-1 us-east-2 us-west-2"
# Exit if oss/ent parameters not provided
if [[ "$1" == "" ]]; then
- echo "Usage: $(basename $0) [oss/ent]"
+ echo "Usage: $(basename $0) [oss/ent/ent-fips]"
exit 1
else
RUN_MODE="$1"
@@ -16,13 +16,23 @@ ABSPATH=$(readlink -f "$0")
SCRIPT_DIR=$(dirname "${ABSPATH}")
BUILD_DIR=$(readlink -f "${SCRIPT_DIR}/build")
+AMI_TAG="production"
+OUTFILE="amis.txt"
+BUILD_TIMESTAMP_FILENAME="${RUN_MODE}_build_timestamp.txt"
+# Conditionally set variables for FIPS
+if [[ "${RUN_MODE}" == "ent-fips" ]]; then
+ AMI_TAG="production-fips"
+ OUTFILE="amis-fips.txt"
+ BUILD_TIMESTAMP_FILENAME="ent_build_timestamp.txt"
+fi
+
# Remove existing AMI ID file if present
-if [ -f "${BUILD_DIR}/amis.txt" ]; then
- rm -f "${BUILD_DIR}/amis.txt"
+if [ -f "${BUILD_DIR}/${OUTFILE}.txt" ]; then
+ rm -f "${BUILD_DIR}/${OUTFILE}.txt"
fi
# Read build timestamp from file
-TIMESTAMP_FILE="${BUILD_DIR}/${RUN_MODE}_build_timestamp.txt"
+TIMESTAMP_FILE="${BUILD_DIR}/${BUILD_TIMESTAMP_FILENAME}"
if [ ! -f "${TIMESTAMP_FILE}" ]; then
echo 'Cannot find "${TIMESTAMP_FILE}"'
exit 1
@@ -31,19 +41,19 @@ BUILD_TIMESTAMP=$(<"${TIMESTAMP_FILE}")
# Write AMI ID for each region to AMI ID file
for REGION in ${REGION_LIST}; do
- aws ec2 describe-images --region ${REGION} --filters "Name=tag:BuildTimestamp,Values=${BUILD_TIMESTAMP}" "Name=tag:BuildType,Values=production" > "${BUILD_DIR}/${REGION}.json"
+ aws ec2 describe-images --region ${REGION} --filters "Name=tag:BuildTimestamp,Values=${BUILD_TIMESTAMP}" "Name=tag:BuildType,Values=${AMI_TAG}" > "${BUILD_DIR}/${REGION}.json"
AMI_ID=$(jq --raw-output '.Images[0].ImageId' "${BUILD_DIR}/${REGION}.json")
if [[ "${AMI_ID}" == "" || "${AMI_ID}" == "null" ]]; then
echo "Error: cannot get AMI ID for ${REGION}"
exit 2
fi
rm -f "${BUILD_DIR}/${REGION}.json"
- echo "${REGION}=${AMI_ID}" >> "${BUILD_DIR}/amis.txt"
+ echo "${REGION}=${AMI_ID}" >> "${BUILD_DIR}/${OUTFILE}.txt"
done
# Make each AMI public (set launchPermission to 'all')
for REGION in ${REGION_LIST}; do
- AMI_ID=$(grep ${REGION} "${BUILD_DIR}/amis.txt" | awk -F= '{print $2}')
+ AMI_ID=$(grep ${REGION} "${BUILD_DIR}/${OUTFILE}.txt" | awk -F= '{print $2}')
if [[ "${AMI_ID}" == "" || "${AMI_ID}" == "null" ]]; then
echo "Error: cannot get AMI ID for ${REGION}"
exit 3
@@ -51,4 +61,4 @@ for REGION in ${REGION_LIST}; do
aws ec2 modify-image-attribute --region ${REGION} --image-id ${AMI_ID} --launch-permission "Add=[{Group=all}]"
echo "AMI ID ${AMI_ID} for ${REGION} set to public"
fi
-done
+done
\ No newline at end of file
diff --git a/assets/marketplace/files/system/teleport-acm.service b/assets/marketplace/files/system/teleport-acm.service
index 18552b857efdc..e57d11dd81e3f 100644
--- a/assets/marketplace/files/system/teleport-acm.service
+++ b/assets/marketplace/files/system/teleport-acm.service
@@ -12,5 +12,5 @@ RestartSec=5
RuntimeDirectory=teleport
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid --insecure-no-tls
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
\ No newline at end of file
diff --git a/assets/marketplace/files/system/teleport-auth.service b/assets/marketplace/files/system/teleport-auth.service
index eac9d486592ae..17f6d85b5a67f 100644
--- a/assets/marketplace/files/system/teleport-auth.service
+++ b/assets/marketplace/files/system/teleport-auth.service
@@ -12,7 +12,7 @@ RestartSec=5
RuntimeDirectory=teleport
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/files/system/teleport-node.service b/assets/marketplace/files/system/teleport-node.service
index 881fca97cb459..b1f6d04cf4a2b 100644
--- a/assets/marketplace/files/system/teleport-node.service
+++ b/assets/marketplace/files/system/teleport-node.service
@@ -13,7 +13,7 @@ RuntimeDirectory=teleport
ExecStartPre=/usr/bin/teleport-ssm-get-token
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/files/system/teleport-proxy-acm.service b/assets/marketplace/files/system/teleport-proxy-acm.service
index 4d11e16083ff7..0dbc6f0e77f78 100644
--- a/assets/marketplace/files/system/teleport-proxy-acm.service
+++ b/assets/marketplace/files/system/teleport-proxy-acm.service
@@ -14,7 +14,7 @@ EnvironmentFile=/etc/teleport.d/conf
ExecStartPre=/usr/bin/teleport-ssm-get-token
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid --insecure-no-tls
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/files/system/teleport-proxy.service b/assets/marketplace/files/system/teleport-proxy.service
index 13db02de7eb58..d0786f8aca9f6 100644
--- a/assets/marketplace/files/system/teleport-proxy.service
+++ b/assets/marketplace/files/system/teleport-proxy.service
@@ -15,7 +15,7 @@ ExecStartPre=/usr/bin/teleport-ssm-get-token
ExecStartPre=/bin/aws s3 sync s3://${TELEPORT_S3_BUCKET}/live/${TELEPORT_DOMAIN_NAME} /var/lib/teleport
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/files/system/teleport.service b/assets/marketplace/files/system/teleport.service
index 10e401fcf268f..74d2fc5c7392e 100644
--- a/assets/marketplace/files/system/teleport.service
+++ b/assets/marketplace/files/system/teleport.service
@@ -13,7 +13,7 @@ RuntimeDirectory=teleport
ExecStartPre=/usr/bin/teleport-all-pre-start
ExecStart=/usr/bin/teleport start --config=/etc/teleport.yaml --diag-addr=127.0.0.1:3434 --pid-file=/run/teleport/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport/teleport.pid
+PIDFile=/run/teleport/teleport.pid
LimitNOFILE=65536
[Install]
diff --git a/assets/marketplace/single-ami.json b/assets/marketplace/single-ami.json
index d175e23e971a6..73a5b2a124f8e 100644
--- a/assets/marketplace/single-ami.json
+++ b/assets/marketplace/single-ami.json
@@ -42,7 +42,7 @@
"tags": {
"Name": "{{user `ami_name`}}",
"BuildTimestamp": "{{user `build_timestamp`}}",
- "BuildType": "{{user `build_type`}}"
+ "BuildType": "production"
},
"run_tags": {
"Name": "{{user `ami_name`}}"
@@ -53,6 +53,45 @@
"snapshot_tags": {
"Name": "{{user `ami_name`}}"
}
+ },{
+ "name": "teleport-aws-linux-fips",
+ "ami_description": "Gravitational Teleport with FIPS support using AWS Linux AMI for AWS Marketplace",
+ "type": "amazon-ebs",
+ "region": "{{user `aws_region`}}",
+ "source_ami_filter": {
+ "filters": {
+ "virtualization-type": "hvm",
+ "name": "amzn2-ami-hvm*-ebs",
+ "root-device-type": "ebs"
+ },
+ "owners": ["137112412989", "591542846629", "801119661308",
+ "102837901569", "013907871322", "206029621532",
+ "286198878708", "443319210888"],
+ "most_recent": true
+ },
+ "instance_type": "{{user `instance_type`}}",
+ "ssh_username": "ec2-user",
+ "ami_name": "{{user `fips_ami_name` | clean_ami_name}}",
+ "ssh_pty" : true,
+ "associate_public_ip_address": true,
+ "vpc_id": "{{user `vpc`}}",
+ "subnet_id": "{{user `subnet`}}",
+ "ami_regions": "{{user `destination_regions`}}",
+ "force_delete_snapshot": "true",
+ "tags": {
+ "Name": "{{user `fips_ami_name`}}",
+ "BuildTimestamp": "{{user `build_timestamp`}}",
+ "BuildType": "production-fips"
+ },
+ "run_tags": {
+ "Name": "{{user `fips_ami_name`}}"
+ },
+ "run_volume_tags": {
+ "Name": "{{user `fips_ami_name`}}"
+ },
+ "snapshot_tags": {
+ "Name": "{{user `fips_ami_name`}}"
+ }
},{
"name": "teleport-aws-linux-marketplace",
"ami_description": "Gravitational Teleport using AWS Linux AMI for AWS Marketplace",
@@ -109,6 +148,13 @@
],
"type": "shell"
},
+ {
+ "type": "shell",
+ "inline": [
+ "touch /tmp/teleport-fips"
+ ],
+ "only": ["teleport-aws-linux-fips"]
+ },
{
"type": "shell",
"script": "files/install.sh",
diff --git a/build.assets/Dockerfile-fips b/build.assets/Dockerfile-fips
new file mode 100644
index 0000000000000..c75c3bac0278f
--- /dev/null
+++ b/build.assets/Dockerfile-fips
@@ -0,0 +1,35 @@
+# This Dockerfile makes the FIPS "build box": the container used to build official
+# FIPS releases of Teleport and its documentation.
+FROM quay.io/gravitational/buildbox-base:1.0
+
+ARG UID
+ARG GID
+
+COPY pam/pam_teleport.so /lib/x86_64-linux-gnu/security
+COPY pam/teleport-acct-failure /etc/pam.d
+COPY pam/teleport-session-failure /etc/pam.d
+COPY pam/teleport-success /etc/pam.d
+
+RUN apt-get update; apt-get install -q -y libpam-dev libc6-dev-i386 net-tools tree
+
+RUN (groupadd jenkins --gid=$GID -o && useradd jenkins --uid=$UID --gid=$GID --create-home --shell=/bin/sh ;\
+ mkdir -p /var/lib/teleport && chown -R jenkins /var/lib/teleport)
+
+# Install etcd.
+RUN (curl -L https://github.com/coreos/etcd/releases/download/v3.3.9/etcd-v3.3.9-linux-amd64.tar.gz | tar -xz ;\
+ cp etcd-v3.3.9-linux-amd64/etcd* /bin/)
+
+# Install Go.
+ARG RUNTIME
+RUN mkdir -p /opt && cd /opt && curl https://go-boringcrypto.storage.googleapis.com/${RUNTIME}b4.linux-amd64.tar.gz | tar xz;\
+ mkdir -p /gopath/src/github.com/gravitational/teleport;\
+ chmod a+w /gopath;\
+ chmod a+w /var/lib;\
+ chmod a-w /
+
+ENV GOPATH="/gopath" \
+ GOROOT="/opt/go" \
+ PATH="$PATH:/opt/go/bin:/gopath/bin:/gopath/src/github.com/gravitational/teleport/build"
+
+VOLUME ["/gopath/src/github.com/gravitational/teleport"]
+EXPOSE 6600 2379 2380
diff --git a/build.assets/Makefile b/build.assets/Makefile
index 0c9cab3a511ac..490b468252b7b 100644
--- a/build.assets/Makefile
+++ b/build.assets/Makefile
@@ -2,6 +2,7 @@
# This Makefile is used for producing official Teleport releases
#
BBOX=teleport-buildbox:latest
+BBOXFIPS=teleport-buildbox-fips:latest
DOCSBOX=teleport-docsbox:latest
DOCSHOST=teleport-docs
@@ -53,6 +54,17 @@ bbox:
--build-arg RUNTIME=$(RUNTIME) \
--tag $(BBOX) .
+#
+# Builds a Docker buildbox for FIPS
+#
+.PHONY:bbox-fips
+bbox-fips:
+ docker build \
+ --build-arg UID=$$(id -u) \
+ --build-arg GID=$$(id -g) \
+ --build-arg RUNTIME=$(RUNTIME) \
+ --tag $(BBOXFIPS) -f Dockerfile-fips .
+
#
# Builds a Docker container for building mkdocs documentation
#
@@ -141,6 +153,16 @@ release: bbox
docker run $(DOCKERFLAGS) -i $(NOROOT) $(BBOX) \
/usr/bin/make release -e ADDFLAGS="$(ADDFLAGS)" OS=$(OS) ARCH=$(ARCH) RUNTIME=$(RUNTIME)
+#
+# Create a Teleport FIPS package using the build container.
+# This is a special case because it only builds and packages the Enterprise FIPS binaries, no OSS.
+#
+.PHONY:release-fips
+release-fips: bbox-fips
+ @if [ -z ${VERSION} ]; then echo "VERSION is not set"; exit 1; fi
+ docker run $(DOCKERFLAGS) -i $(NOROOT) $(BBOXFIPS) \
+ /usr/bin/make -C e release -e ADDFLAGS="$(ADDFLAGS)" OS=$(OS) ARCH=$(ARCH) RUNTIME=$(RUNTIME) FIPS=yes VERSION=$(VERSION) GITTAG=v$(VERSION)-fips
+
#
# Create a Windows Teleport package using the build container.
#
diff --git a/build.assets/charts/Dockerfile-fips b/build.assets/charts/Dockerfile-fips
new file mode 100644
index 0000000000000..481136182d8f1
--- /dev/null
+++ b/build.assets/charts/Dockerfile-fips
@@ -0,0 +1,23 @@
+FROM ubuntu:18.04
+
+# Install dumb-init and ca-certificate. The dumb-init package is to ensure
+# signals and orphaned processes are are handled correctly. The ca-certificate
+# package is installed because the base Ubuntu image does not come with any
+# certificate authorities.
+#
+# Note that /var/lib/apt/lists/* is cleaned up in the same RUN command as
+# "apt-get update" to reduce the size of the image.
+RUN apt-get update && apt-get upgrade -y && \
+ apt-get install --no-install-recommends -y \
+ dumb-init \
+ ca-certificates \
+ && update-ca-certificates \
+ && rm -rf /var/lib/apt/lists/*
+
+# Bundle "teleport", "tctl", and "tsh" binaries into image.
+ADD teleport /usr/local/bin/teleport
+ADD tctl /usr/local/bin/tctl
+ADD tsh /usr/local/bin/tsh
+
+# By setting this entry point, we expose make target as command.
+ENTRYPOINT ["/usr/bin/dumb-init", "teleport", "start", "-c", "/etc/teleport/teleport.yaml", "--fips"]
diff --git a/build.assets/pkg/etc/teleport.yaml b/build.assets/pkg/etc/teleport.yaml
index 23275dd7bbf40..7806bdf1c09c4 100644
--- a/build.assets/pkg/etc/teleport.yaml
+++ b/build.assets/pkg/etc/teleport.yaml
@@ -1,7 +1,5 @@
# By default, this file should be stored in /etc/teleport.yaml
-## IMPORTANT ##
-#When editing YAML configuration, please pay attention to how your editor handles white space. YAML requires consistent handling of tab characters
# This section of the configuration file applies to all teleport
# services.
teleport:
@@ -9,108 +7,274 @@ teleport:
# by default it's equal to hostname
nodename: graviton
- # Data directory where Teleport keeps its data, like keys/users for
- # authentication (if using the default BoltDB back-end)
- data_dir: /var/lib/teleport
+ # Data directory where Teleport daemon keeps its data.
+ # See "Filesystem Layout" section above for more details.
+ # data_dir: /var/lib/teleport
- # one-time invitation token used to join a cluster. it is not used on
+ # Invitation token used to join a cluster. it is not used on
# subsequent starts
auth_token: xxxx-token-xxxx
- # when running in multi-homed or NATed environments Teleport nodes need
+ # Optional CA pin of the auth server. This enables more secure way of adding new
+ # nodes to a cluster. See "Adding Nodes" section above.
+ # ca_pin: "sha256:7e12c17c20d9cb504bbcb3f0236be3f446861f1396dcbb44425fe28ec1c108f1"
+
+ # When running in multi-homed or NATed environments Teleport nodes need
# to know which IP it will be reachable at by other nodes
- #advertise_ip: 0.0.0.0
+ #
+ # This value can be specified as FQDN e.g. host.example.com
+ # advertise_ip: 10.1.0.5
# list of auth servers in a cluster. you will have more than one auth server
- # if you configure teleport auth to run in HA configuration
- auth_servers:
- - localhost:3025
+ # if you configure teleport auth to run in HA configuration.
+ # If adding a node located behind NAT, use the Proxy URL. e.g.
+ # auth_servers:
+ # - teleport-proxy.example.com:3080
+ auth_servers:
+ - 127.0.0.1:3025
# Teleport throttles all connections to avoid abuse. These settings allow
# you to adjust the default limits
- connection_limits:
- max_connections: 1000
- max_users: 250
+ # connection_limits:
+ # max_connections: 1000
+ # max_users: 250
- # Logging configuration. Possible output values are 'stdout', 'stderr' and
+ # Logging configuration. Possible output values are 'stdout', 'stderr' and
# 'syslog'. Possible severity values are INFO, WARN and ERROR (default).
- log:
- output: stderr
- severity: ERROR
+ # log:
+ # output: stderr
+ # severity: ERROR
- # Type of storage used for keys. You need to configure this to use etcd
- # backend if you want to run Teleport in HA configuration.
+ # Configuration for the storage back-end used for the cluster state and the
+ # audit log. Several back-end types are supported. See "High Availability"
+ # section of this Admin Manual below to learn how to configure DynamoDB,
+ # S3, etcd and other highly available back-ends.
storage:
- type: bolt
+ # By default teleport uses the `data_dir` directory on a local filesystem
+ type: dir
+
+ # Array of locations where the audit log events will be stored. by
+ # default they are stored in `/var/lib/teleport/log`
+ # audit_events_uri: ['file:///var/lib/teleport/log', 'dynamodb://events_table_name', 'stdout://']
+
+ # Use this setting to configure teleport to store the recorded sessions in
+ # an AWS S3 bucket. see "Using Amazon S3" chapter for more information.
+ # audit_sessions_uri: 's3://example.com/path/to/bucket?region=us-east-1'
+
+ # Cipher algorithms that the server supports. This section only needs to be
+ # set if you want to override the defaults.
+ # ciphers:
+ # - aes128-ctr
+ # - aes192-ctr
+ # - aes256-ctr
+ # - aes128-gcm@openssh.com
+ # - chacha20-poly1305@openssh.com
+
+ # Key exchange algorithms that the server supports. This section only needs
+ # to be set if you want to override the defaults.
+ # kex_algos:
+ # - curve25519-sha256@libssh.org
+ # - ecdh-sha2-nistp256
+ # - ecdh-sha2-nistp384
+ # - ecdh-sha2-nistp521
+
+ # Message authentication code (MAC) algorithms that the server supports.
+ # This section only needs to be set if you want to override the defaults.
+ # mac_algos:
+ # - hmac-sha2-256-etm@openssh.com
+ # - hmac-sha2-256
+
+ # List of the supported ciphersuites. If this section is not specified,
+ # only the default ciphersuites are enabled.
+ # ciphersuites:
+ # - tls-rsa-with-aes-128-gcm-sha256
+ # - tls-rsa-with-aes-256-gcm-sha384
+ # - tls-ecdhe-rsa-with-aes-128-gcm-sha256
+ # - tls-ecdhe-ecdsa-with-aes-128-gcm-sha256
+ # - tls-ecdhe-rsa-with-aes-256-gcm-sha384
+ # - tls-ecdhe-ecdsa-with-aes-256-gcm-sha384
+ # - tls-ecdhe-rsa-with-chacha20-poly1305
+ # - tls-ecdhe-ecdsa-with-chacha20-poly1305
+
# This section configures the 'auth service':
auth_service:
+ # Turns 'auth' role on. Default is 'yes'
enabled: yes
+
+ # A cluster name is used as part of a signature in certificates
+ # generated by this CA.
+ #
+ # We strongly recommend to explicitly set it to something meaningful as it
+ # becomes important when configuring trust between multiple clusters.
+ #
+ # By default an automatically generated name is used (not recommended)
+ #
+ # IMPORTANT: if you change cluster_name, it will invalidate all generated
+ # certificates and keys (may need to wipe out /var/lib/teleport directory)
+ # cluster_name: "main"
+
+ authentication:
+ # default authentication type. possible values are 'local', 'oidc' and 'saml'
+ # only local authentication (Teleport's own user DB) is supported in the open
+ # source version
+ type: local
+ # second_factor can be off, otp, or u2f
+ second_factor: otp
+ # this section is used if second_factor is set to 'u2f'
+ # u2f:
+ # app_id must point to the URL of the Teleport Web UI (proxy) accessible
+ # by the end users
+ # app_id: https://localhost:3080
+ # facets must list all proxy servers if there are more than one deployed
+ # facets:
+ # - https://localhost:3080
+
# IP and the port to bind to. Other Teleport nodes will be connecting to
- # this port (AKA "Auth API" or "Cluster API") to validate client
- # certificates
+ # this port (AKA "Auth API" or "Cluster API") to validate client
+ # certificates
listen_addr: 0.0.0.0:3025
+ # The optional DNS name the auth server if located behind a load balancer.
+ # (see public_addr section below)
+ # public_addr: auth.example.com:3025
+
# Pre-defined tokens for adding new nodes to a cluster. Each token specifies
- # the role a new node will be allowed to assume. The more secure way to
- # add nodes is to use `ttl node add --ttl` command to generate auto-expiring
- # tokens.
+ # the role a new node will be allowed to assume. The more secure way to
+ # add nodes is to use `ttl node add --ttl` command to generate auto-expiring
+ # tokens.
#
# We recommend to use tools like `pwgen` to generate sufficiently random
# tokens of 32+ byte length.
- tokens:
- - "proxy,node:xxxxx"
- - "auth:yyyy"
+ # tokens:
+ # - "proxy,node:xxxxx"
+ # - "auth:yyyy"
+
+ # Optional setting for configuring session recording. Possible values are:
+ # "node" : sessions will be recorded on the node level (the default)
+ # "proxy" : recording on the proxy level, see "recording proxy mode" section.
+ # "off" : session recording is turned off
+ # session_recording: "node"
+
+ # This setting determines if a Teleport proxy performs strict host key checks.
+ # Only applicable if session_recording=proxy, see "recording proxy mode" for details.
+ # proxy_checks_host_keys: yes
+
+ # Determines if SSH sessions to cluster nodes are forcefully terminated
+ # after no activity from a client (idle client).
+ # Examples: "30m", "1h" or "1h30m"
+ # client_idle_timeout: never
+
+ # Determines if the clients will be forcefully disconnected when their
+ # certificates expire in the middle of an active SSH session. (default is 'no')
+ # disconnect_expired_cert: no
+
+ # Determines the interval at which Teleport will send keep-alive messages. The
+ # default value mirrors sshd at 15 minutes. keep_alive_count_max is the number
+ # of missed keep-alive messages before the server tears down the connection to the
+ # client.
+ # keep_alive_interval: 15
+ # keep_alive_count_max: 3
+
+ # License file to start auth server with. Note that this setting is ignored
+ # in open-source Teleport and is required only for Teleport Pro, Business
+ # and Enterprise subscription plans.
+ #
+ # The path can be either absolute or relative to the configured `data_dir`
+ # and should point to the license file obtained from Teleport Download Portal.
+ #
+ # If not set, by default Teleport will look for the `license.pem` file in
+ # the configured `data_dir`.
+ # license_file: /var/lib/teleport/license.pem
+
+ # DEPRECATED in Teleport 3.2 (moved to proxy_service section)
+ # kubeconfig_file: /path/to/kubeconfig
# This section configures the 'node service':
ssh_service:
+ # Turns 'ssh' role on. Default is 'yes'
enabled: yes
- # IP and the port for SSH service to bind to.
+
+ # IP and the port for SSH service to bind to.
listen_addr: 0.0.0.0:3022
+
+ # The optional public address the SSH service. This is useful if administrators
+ # want to allow users to connect to nodes directly, bypassing a Teleport proxy
+ # (see public_addr section below)
+ # public_addr: node.example.com:3022
+
# See explanation of labels in "Labeling Nodes" section below
- labels:
- role: master
- type: postgres
- # List (YAML array) of commands to periodically execute and use
- # their output as labels.
- # See explanation of how this works in "Labeling Nodes" section below
+ # labels:
+ # role: master
+ # type: postgres
+
+ # List of the commands to periodically execute. Their output will be used as node labels.
+ # See "Labeling Nodes" section below for more information and more examples.
commands:
- - name: hostname
- command: [/usr/bin/hostname]
- period: 1m0s
+ # this command will add a label 'arch=x86_64' to a node
- name: arch
- command: [/usr/bin/uname, -p]
+ command: ['/bin/uname', '-p']
period: 1h0m0s
-# This section configures the 'proxy servie'
+ # enables reading ~/.tsh/environment before creating a session. by default
+ # set to false, can be set true here or as a command line flag.
+ # permit_user_env: false
+
+ # configures PAM integration. see below for more details.
+ # pam:
+ # enabled: no
+ # service_name: teleport
+
+# This section configures the 'proxy service'
proxy_service:
+ # Turns 'proxy' role on. Default is 'yes'
enabled: yes
+
# SSH forwarding/proxy address. Command line (CLI) clients always begin their
# SSH sessions by connecting to this port
listen_addr: 0.0.0.0:3023
- # Reverse tunnel listening address. An auth server (CA) can establish an
- # outbound (from behind the firewall) connection to this address.
- # This will allow users of the outside CA to connect to behind-the-firewall
+ # Reverse tunnel listening address. An auth server (CA) can establish an
+ # outbound (from behind the firewall) connection to this address.
+ # This will allow users of the outside CA to connect to behind-the-firewall
# nodes.
- tunnel_listen_addr: 0.0.0.0:3024
-
- # List (array) of other clusters this CA trusts.
- trusted_clusters:
- - key_file: /path/to/main-cluster.ca
- # Comma-separated list of OS logins allowed to users of this
- # trusted cluster
- allow_logins: john,root
- # Establishes a reverse SSH tunnel from this cluster to the trusted
- # cluster, allowing the trusted cluster users to access nodes of this
- # cluster
- #tunnel_addr: 80.10.0.12:3024
-
- # The HTTPS listen address to serve the Web UI and also to authenticate the
+ # tunnel_listen_addr: 0.0.0.0:3024
+
+ # The HTTPS listen address to serve the Web UI and also to authenticate the
# command line (CLI) users via password+HOTP
web_listen_addr: 0.0.0.0:3080
- # TLS certificate for the HTTPS connection. Configuring these properly is
+ # The DNS name the proxy HTTPS endpoint as accessible by cluster users.
+ # Defaults to the proxy's hostname if not specified. If running multiple
+ # proxies behind a load balancer, this name must point to the load balancer
+ # (see public_addr section below)
+ # public_addr: proxy.example.com:3080
+
+ # The DNS name of the proxy SSH endpoint as accessible by cluster clients.
+ # Defaults to the proxy's hostname if not specified. If running multiple proxies
+ # behind a load balancer, this name must point to the load balancer.
+ # Use a TCP load balancer because this port uses SSH protocol.
+ # ssh_public_addr: proxy.example.com:3023
+
+ # TLS certificate for the HTTPS connection. Configuring these properly is
# critical for Teleport security.
- https_key_file: /etc/teleport/teleport.key
- https_cert_file: /etc/teleport/teleport.crt
+ # https_key_file: /var/lib/teleport/webproxy_key.pem
+ # https_cert_file: /var/lib/teleport/webproxy_cert.pem
+
+ # This section configures the Kubernetes proxy service
+ # kubernetes:
+ # Turns 'kubernetes' proxy on. Default is 'no'
+ # enabled: yes
+
+ # Kubernetes proxy listen address.
+ # listen_addr: 0.0.0.0:3026
+
+ # The DNS name of the Kubernetes proxy server that is accessible by cluster clients.
+ # If running multiple proxies behind a load balancer, this name must point to the
+ # load balancer.
+ # public_addr: ['kube.example.com:3026']
+
+ # This setting is not required if the Teleport proxy service is
+ # deployed inside a Kubernetes cluster. Otherwise, Teleport proxy
+ # will use the credentials from this file:
+ # kubeconfig_file: /path/to/kube/config
diff --git a/build.assets/pkg/init-scripts/systemd/system/teleport.service b/build.assets/pkg/init-scripts/systemd/system/teleport.service
index ba386b9762af8..9f0c9d5edffb6 100644
--- a/build.assets/pkg/init-scripts/systemd/system/teleport.service
+++ b/build.assets/pkg/init-scripts/systemd/system/teleport.service
@@ -1,11 +1,13 @@
[Unit]
Description=Teleport SSH Service
-After=network.target
+After=network.target
[Service]
Type=simple
-Restart=always
-ExecStart=/usr/bin/teleport start
+Restart=on-failure
+ExecStart=/usr/local/bin/teleport start --pid-file=/run/teleport.pid
+ExecReload=/bin/kill -HUP $MAINPID
+PIDFile=/run/teleport.pid
[Install]
WantedBy=multi-user.target
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 569c1cc8b6d7b..c19f620ee591e 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -13,7 +13,7 @@ RUN apt-get update; apt-get install -y htop vim screen; \
# allows ansible and ssh testing
RUN apt-get install -y ansible ssh inetutils-syslogd
-RUN mkdir /var/run/sshd
+RUN mkdir /run/sshd
VOLUME ["/teleport", "/var/lib/teleport"]
COPY .bashrc /root/.bashrc
diff --git a/docs/1.3/admin-guide.md b/docs/1.3/admin-guide.md
index fcb64f5638845..c9a4e65b98bce 100644
--- a/docs/1.3/admin-guide.md
+++ b/docs/1.3/admin-guide.md
@@ -496,7 +496,7 @@ user "john" to have access to them. We already have our primary Teleport cluster
users set up. Say this primary cluster is called `main`, and behind-the-firewall cluster
is called `cluster-b` as shown on this diagram:
-
+
This setup works as follows:
diff --git a/docs/2.0/admin-guide.md b/docs/2.0/admin-guide.md
index dcd3a22310c43..0af69a0c30269 100644
--- a/docs/2.0/admin-guide.md
+++ b/docs/2.0/admin-guide.md
@@ -747,7 +747,7 @@ user "john" to have access to them. We already have our primary Teleport cluster
users set up. Say this primary cluster is called `main`, and the behind-the-firewall cluster
is called `cluster-b` as shown on this diagram:
-
+
This setup works as follows:
diff --git a/docs/2.3/admin-guide.md b/docs/2.3/admin-guide.md
index 6c1fb732bd680..ddf672ce1cba1 100644
--- a/docs/2.3/admin-guide.md
+++ b/docs/2.3/admin-guide.md
@@ -856,7 +856,7 @@ user "john" to have access to them. We already have our primary Teleport cluster
users set up. Say this primary cluster is called `main`, and the behind-the-firewall cluster
is called `east` as shown on this diagram:
-
+
This setup works as follows:
diff --git a/docs/2.4/admin-guide.md b/docs/2.4/admin-guide.md
index ec9b81f02239a..100b39fbbf5da 100644
--- a/docs/2.4/admin-guide.md
+++ b/docs/2.4/admin-guide.md
@@ -979,7 +979,8 @@ Resource Kind | Description
-------------------|--------------
user | A user record in the internal Teleport user DB.
node | A registered SSH node. The same record is displayed via `tctl nodes ls`
-trusted_cluster | A trusted cluster. See [here](#trusted-clusters) for more details on connecting clusters together.
+trusted_cluster | A trusted cluster. See [here](#trusted-clusters) for more details on connecting clusters together.
+
role | A role assumed by users. The open source Teleport only includes one role: "admin", but Enterprise teleport users can define their own roles.
github | A Github auth connector. See [here](#github-auth-connector) for details on configuring it.
@@ -997,7 +998,7 @@ user "john" to have access to them. We already have our primary Teleport cluster
users set up. Say this primary cluster is called `main`, and the behind-the-firewall cluster
is called `east` as shown on this diagram:
-
+
This setup works as follows:
diff --git a/docs/4.0.yaml b/docs/4.0.yaml
index 22185047b0c5e..5767041f28874 100644
--- a/docs/4.0.yaml
+++ b/docs/4.0.yaml
@@ -6,8 +6,8 @@ site_author: Gravitational Inc
copyright: Gravitational Inc, 2016-19
# output directory:
-site_dir: ../build/docs/4.0
-docs_dir: "4.0"
+site_dir: ../build/docs/4.1
+docs_dir: "4.1"
theme: readthedocs
theme_dir: theme
@@ -19,7 +19,7 @@ markdown_extensions:
extra_css: []
extra_javascript: []
extra:
- version: 4.0
+ version: 4.1
pages:
- Documentation:
- Introduction: intro.md
diff --git a/docs/4.0/admin-guide.md b/docs/4.0/admin-guide.md
index c2b839a553906..febae4dd6cccf 100644
--- a/docs/4.0/admin-guide.md
+++ b/docs/4.0/admin-guide.md
@@ -106,9 +106,9 @@ After=network.target
[Service]
Type=simple
Restart=on-failure
-ExecStart=/usr/local/bin/teleport start --config=/etc/teleport.yaml --pid-file=/var/run/teleport.pid
+ExecStart=/usr/local/bin/teleport start --config=/etc/teleport.yaml --pid-file=/run/teleport.pid
ExecReload=/bin/kill -HUP $MAINPID
-PIDFile=/var/run/teleport.pid
+PIDFile=/run/teleport.pid
[Install]
WantedBy=multi-user.target
@@ -1389,7 +1389,7 @@ world usage examples of this capability include:
Let's take a look at how a connection is established between the "main" cluster and the "east" cluster:
-
+
This setup works as follows:
@@ -1436,7 +1436,7 @@ The cluster invite token: generated-token-to-add-new-clusters
**Using a Cluster Join Token**
-Now, the administrator of "east" must create the following resource file:
+Now, the administrator of "east (leaf)" must create the following resource file:
```yaml
# cluster.yaml
@@ -1445,7 +1445,7 @@ version: v2
metadata:
# the trusted cluster name MUST match the 'cluster_name' setting of the
# cluster
- name: main
+ name: east
spec:
# this field allows to create tunnels that are disabled, but can be enabled later.
enabled: true
@@ -1483,7 +1483,7 @@ list of available clusters.
### Using Trusted Clusters
-As mentioned above, accessibility is only granted in one direction. So, only users from the "main" (trusted cluster) can now access nodes in the "east" (trusting cluster). Users in the "east" cluster will not be able to access the "main" cluster.
+As mentioned above, accessibility is only granted in one direction. So, only users from the "main" (root cluster) can now access nodes in the "east" (leaf cluster). Users in the "east" cluster will not be able to access the "main" cluster.
```bsh
# login into the main cluster:
diff --git a/docs/4.0/img/trusted-clusters/Teleport-Cluster-switcher.png b/docs/4.0/img/trusted-clusters/Teleport-Cluster-switcher.png
new file mode 100644
index 0000000000000..d05eb3e7dd6f9
Binary files /dev/null and b/docs/4.0/img/trusted-clusters/Teleport-Cluster-switcher.png differ
diff --git a/docs/4.0/img/trusted-clusters/TrustedClusters-MSP.svg b/docs/4.0/img/trusted-clusters/TrustedClusters-MSP.svg
new file mode 100644
index 0000000000000..610571470b737
--- /dev/null
+++ b/docs/4.0/img/trusted-clusters/TrustedClusters-MSP.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/4.0/img/trusted-clusters/TrustedClusters-Simple.svg b/docs/4.0/img/trusted-clusters/TrustedClusters-Simple.svg
new file mode 100644
index 0000000000000..8f90111168817
--- /dev/null
+++ b/docs/4.0/img/trusted-clusters/TrustedClusters-Simple.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/4.0/img/trusted-clusters/setting-up-trust.png b/docs/4.0/img/trusted-clusters/setting-up-trust.png
new file mode 100644
index 0000000000000..f3facf19b54a2
Binary files /dev/null and b/docs/4.0/img/trusted-clusters/setting-up-trust.png differ
diff --git a/docs/4.0/trustedclusters.md b/docs/4.0/trustedclusters.md
index 9169fe38d9ba4..f2e93622251b6 100644
--- a/docs/4.0/trustedclusters.md
+++ b/docs/4.0/trustedclusters.md
@@ -1,5 +1,17 @@
# Trusted Clusters
+The design of trusted clusters allows Teleport users to connect to compute infrastructure
+located behind firewalls without any open TCP ports. The real world usage examples of this
+capability include:
+
+ - Managed service providers (MSP) remotely managing infrastructure of their clients.
+ - Device manufacturers remotely maintaining computing appliances deployed on premises.
+ - Large cloud software vendors manage multiple data centers using a common proxy.
+
+**Example of a MSP provider using trusted cluster to obtain access to clients clusters.**
+
+
+
If you haven't already looked at the introduction to [Trusted Clusters](admin-guide.md#trusted-clusters)
in the Admin Guide we recommend you review that for an overview before continuing with this guide.
@@ -33,15 +45,15 @@ _proxy server_. So, if users want to connect to nodes belonging to different
clusters, they would normally have to use different `--proxy` flags for each
cluster. This is not always convenient.
-The concept of _trusted clusters_ allows Teleport administrators to connect
+The concept of _leaf clusters_ allows Teleport administrators to connect
multiple clusters together and establish trust between them. Trusted clusters
-allow users of one cluster to seamlessly SSH into the nodes of another cluster
-without having to "hop" between proxy servers. Moreover, users don't even need
-to have a direct connection to other clusters' proxy servers. The user
+allow users of one cluster, the root cluster to seamlessly SSH into the nodes of
+another cluster without having to "hop" between proxy servers. Moreover, users don't
+even need to have a direct connection to other clusters' proxy servers. The user
experience looks like this:
```bsh
-# login using the "main" cluster credentials:
+# login using the root "main" cluster credentials:
$ tsh login --proxy=main.example.com
# SSH into some host inside the "main" cluster:
@@ -55,17 +67,20 @@ $ tsh ssh --cluster=east host
$ tsh clusters
```
-Trusted clusters also have their own restrictions on user access, i.e.
+Leaf clusters also have their own restrictions on user access, i.e.
_permissions mapping_ takes place.
+**Once connection has been established it's easy to switch from the "main" root cluster**
+
+
## Join Tokens
Lets start with the diagram of how connection between two clusters is established:
-
+
The first step in establishing a secure tunnel between two clusters is for the
-_trusting_ cluster "east" to connect to the _trusted_ cluster "main". When this
+_leaf_ cluster "east" to connect to the _root_ cluster "main". When this
happens for _the first time_, clusters know nothing about each other, thus a
shared secret needs to exist in order for "main" to accept the connection from
"east".
@@ -74,12 +89,14 @@ This shared secret is called a "join token". There are two ways to create join
tokens: to statically define them in a configuration file, or to create them on
the fly using `tctl` tool.
+
+
!!! tip "Important":
It is important to realize that join tokens are only used to establish the
connection for the first time. The clusters will exchange certificates and
won't be using the token to re-establish the connection in the future.
-### Static Tokens
+### Static Join Tokens
To create a static join token, update the configuration file on "main" cluster
to look like this:
@@ -94,7 +111,7 @@ auth_service:
This token can be used unlimited number of times.
-### Dynamic Tokens
+### Dynamic Join Tokens
Creating a token dynamically with a CLI tool offers the advantage of applying a
time to live (TTL) interval on it, i.e. it will be impossible to re-use such
@@ -219,6 +236,17 @@ role name and use reference it to name the local role:
**NOTE:** The regexp matching is activated only when the expression starts
with `^` and ends with `$`
+
+### Trusted Cluster UI
+For customers using Teleport Enterprise, they can easily configure _leaf_ nodes using the
+Teleport Proxy UI.
+
+**Creating Trust from the Leaf node to the root node.**
+
+
+**The root cluster showing the cluster switching UI.**
+
+
## Updating Trusted Cluster Role Map
In order to update the role map for a trusted cluster first we will need to remove the cluster by executing:
@@ -240,7 +268,7 @@ Now an admin from the main cluster can now see and access the "east" cluster:
```bsh
# login into the main cluster:
-$ tsh --proxy=proxy.main login admin
+$ tsh --proxy=main.example.com login admin
```
```bsh
@@ -304,8 +332,8 @@ certificate authority. A certificate contains four important pieces of data:
Try executing `tsh status` right after `tsh login` to see all these fields in the
client certificate.
-When a user from "main" tries to connect to a node inside "east" cluster, her
-certificate is presented to the auth server of "east" and it performs the
+When a user from "main (root)" tries to connect to a node inside "east (leaf)" cluster, her
+certificate is presented to the auth server of "east (leaf)" and it performs the
following checks:
* Checks that the certificate signature matches one of the trusted clusters.
diff --git a/docs/4.1.yaml b/docs/4.1.yaml
index b2c5f7eda3f0f..781223ecbd925 100644
--- a/docs/4.1.yaml
+++ b/docs/4.1.yaml
@@ -26,15 +26,9 @@ pages:
- Documentation:
- Introduction: intro.md
- Quick Start Guide: quickstart.md
- - Architecture: architecture.md
- User Manual: user-manual.md
- Admin Manual: admin-guide.md
- FAQ: faq.md
- - Teleport Enterprise:
- - Introduction: enterprise.md
- - Quick Start Guide: quickstart-enterprise.md
- - RBAC: ssh_rbac.md
- - Single sign-on (SSO): ssh_sso.md
- Guides:
- AWS: aws_oss_guide.md
- Okta: ssh_okta.md
@@ -44,3 +38,14 @@ pages:
- OIDC: oidc.md
- Trusted Clusters: trustedclusters.md
- Kubernetes Guide: kubernetes_ssh.md
+ - Architecture:
+ - Architecture Overview: architecture/overview.md
+ - Teleport Users: architecture/users.md
+ - Teleport Nodes: architecture/nodes.md
+ - Teleport Auth: architecture/auth.md
+ - Teleport Proxy: architecture/proxy.md
+ - Enterprise Guides:
+ - Introduction: enterprise.md
+ - Quick Start Guide: quickstart-enterprise.md
+ - RBAC: ssh_rbac.md
+ - Single sign-on (SSO): ssh_sso.md
diff --git a/docs/4.1/admin-guide.md b/docs/4.1/admin-guide.md
index c2b839a553906..09076effd07c4 100644
--- a/docs/4.1/admin-guide.md
+++ b/docs/4.1/admin-guide.md
@@ -1,13 +1,14 @@
# Teleport Admin Manual
-This manual covers the installation and configuration of Teleport and the ongoing
-management of a Teleport cluster. It assumes that the reader has good understanding
-of Linux administration.
+This manual covers the installation and configuration of Teleport and the
+ongoing management of a Teleport cluster. It assumes that the reader has good
+understanding of Linux administration.
## Installing
-To install, download the official binaries from the [Teleport Downloads](https://gravitational.com/teleport/download/)
-section on our web site and run:
+To install, download the official binaries from the [Teleport
+Downloads](https://gravitational.com/teleport/download/) section on our web site
+and run:
```
$ tar -xzf teleport-binary-release.tar.gz
@@ -19,7 +20,7 @@ $ sudo make install
Gravitational Teleport is written in Go language. It requires Golang v1.8.3 or
newer.
-```bash
+``` bash
# get the source & build:
$ mkdir -p $GOPATH/src/github.com/gravitational
$ cd $GOPATH/src/github.com/gravitational
@@ -33,50 +34,53 @@ $ sudo mkdir -p /var/lib/teleport
### Teleport Checksum
-Gravitational Teleport provides a checksum from the Downloads page. This can be used to
-verify the integrity of our binary.
+Gravitational Teleport provides a checksum from the Downloads page. This can be
+used to verify the integrity of our binary.

**Checking Checksum on Mac OS**
-```bash
+
+``` bash
$ shasum -a 256 teleport-v4.0.8-darwin-amd64-bin.tar.gz
0826a17b440ac20d4c38ade3d0a5eb1c62a00c4d5eb88e60b5ea627d426aaed2 teleport-v4.0.8-darwin-amd64-bin.tar.gz
```
**Checking Checksum on Linux**
-```bash
+
+``` bash
$ sha256sum teleport-v4.0.8-darwin-amd64-bin.tar.gz
0826a17b440ac20d4c38ade3d0a5eb1c62a00c4d5eb88e60b5ea627d426aaed2 teleport-v4.0.8-darwin-amd64-bin.tar.gz
```
**Checking Checksum on Automated Systems**
-If you download Teleport via an automated system, you can programmatically obtain the checksum
-by adding `.sha256` to the binary.
+If you download Teleport via an automated system, you can programmatically
+obtain the checksum by adding `.sha256` to the binary.
-```bash
+``` bash
$ curl https://get.gravitational.com/teleport-v4.0.8-darwin-amd64-bin.tar.gz.sha256
0826a17b440ac20d4c38ade3d0a5eb1c62a00c4d5eb88e60b5ea627d426aaed2 teleport-v4.0.8-darwin-amd64-bin.tar.gz
```
-
## Definitions
-Before diving into configuring and running Teleport, it helps to take a look at the [Teleport Architecture](/architecture)
-and review the key concepts this document will be referring to:
+Before diving into configuring and running Teleport, it helps to take a look at
+the [Teleport Architecture](/architecture) and review the key concepts this
+document will be referring to:
|Concept | Description
|----------|------------
-|Node | Synonym to "server" or "computer", something one can "SSH to". A node must be running the `teleport` daemon with "node" role/service turned on.
+|Node | Synonym to "server" or "computer", something one can "SSH to". A node must be running the [ `teleport` ](../cli-docs/#teleport) daemon with "node" role/service turned on.
|Certificate Authority (CA) | A pair of public/private keys Teleport uses to manage access. A CA can sign a public key of a user or node, establishing their cluster membership.
|Teleport Cluster | A Teleport Auth Service contains two CAs. One is used to sign user keys and the other signs node keys. A collection of nodes connected to the same CA is called a "cluster".
-|Cluster Name | Every Teleport cluster must have a name. If a name is not supplied via `teleport.yaml` configuration file, a GUID will be generated. **IMPORTANT:** renaming a cluster invalidates its keys and all certificates it had created.
+|Cluster Name | Every Teleport cluster must have a name. If a name is not supplied via `teleport.yaml` configuration file, a GUID will be generated.**IMPORTANT:** renaming a cluster invalidates its keys and all certificates it had created.
|Trusted Cluster | Teleport Auth Service can allow 3rd party users or nodes to connect if their public keys are signed by a trusted CA. A "trusted cluster" is a pair of public keys of the trusted CA. It can be configured via `teleport.yaml` file.
## Teleport Daemon
-The Teleport daemon is called `teleport` and it supports the following commands:
+The Teleport daemon is called [ `teleport` ](./cli-docs/#teleport) and it supports
+the following commands:
|Command | Description
|------------|-------------------------------------------------------
@@ -86,19 +90,21 @@ The Teleport daemon is called `teleport` and it supports the following commands:
|status | Shows the status of a Teleport connection. This command is only available from inside of an active SSH session.
|help | Shows help.
-When experimenting, you can quickly start `teleport` with verbose logging by typing
-`teleport start -d`.
+When experimenting, you can quickly start [ `teleport` ](../cli-docs/#teleport)
+with verbose logging by typing [ `teleport start -d` ](./cli-docs/#teleport-start)
+.
-!!! danger "WARNING":
- Teleport stores data in `/var/lib/teleport`. Make sure that regular/non-admin users do not
- have access to this folder on the Auth server.
+!!! danger "WARNING"
+ Teleport stores data in `/var/lib/teleport` . Make sure that
+ regular/non-admin users do not have access to this folder on the Auth
+ server.
### Systemd Unit File
In production, we recommend starting teleport daemon via an init system like
-`systemd`. Here's the recommended Teleport service unit file for systemd:
+`systemd` . Here's the recommended Teleport service unit file for systemd:
-```yaml
+``` yaml
[Unit]
Description=Teleport SSH Service
After=network.target
@@ -116,28 +122,30 @@ WantedBy=multi-user.target
### Graceful Restarts
-If using the systemd service unit file above, executing `systemctl reload teleport`
-will perform a graceful restart, i.e. the Teleport daemon will fork a new
-process to handle new incoming requests, leaving the old daemon process running
-until existing clients disconnect.
+If using the systemd service unit file above, executing `systemctl reload
+teleport` will perform a graceful restart, i.e.the Teleport daemon will fork a
+new process to handle new incoming requests, leaving the old daemon process
+running until existing clients disconnect.
!!! warning "Version warning":
- Graceful restarts only work if Teleport is deployed using network-based storage
- like DynamoDB or etcd 3.3+. Future versions of Teleport will not have this limitation.
+ Graceful restarts only work if Teleport is
+ deployed using network-based storage like DynamoDB or etcd 3.3+. Future
+ versions of Teleport will not have this limitation.
-You can also perform restarts/upgrades by sending `kill` signals
-to a Teleport daemon manually.
+You can also perform restarts/upgrades by sending `kill` signals to a Teleport
+daemon manually.
| Signal | Teleport Daemon Behavior
|-------------------------|---------------------------------------
-| `USR1` | Dumps diagnostics/debugging information into syslog.
-| `TERM`, `INT` or `KILL` | Immediate non-graceful shutdown. All existing connections will be dropped.
-| `USR2` | Forks a new Teleport daemon to serve new connections.
-| `HUP` | Forks a new Teleport daemon to serve new connections **and** initiates the graceful shutdown of the existing process when there are no more clients connected to it.
+| `USR1` | Dumps diagnostics/debugging information into syslog.
+| `TERM` , `INT` or `KILL` | Immediate non-graceful shutdown. All existing connections will be dropped.
+| `USR2` | Forks a new Teleport daemon to serve new connections.
+| `HUP` | Forks a new Teleport daemon to serve new connections **and** initiates the graceful shutdown of the existing process when there are no more clients connected to it.
### Ports
-Teleport services listen on several ports. This table shows the default port numbers.
+Teleport services listen on several ports. This table shows the default port
+numbers.
|Port | Service | Description
|----------|------------|-------------------------------------------
@@ -150,91 +158,35 @@ Teleport services listen on several ports. This table shows the default port num
### Filesystem Layout
-By default, a Teleport node has the following files present. The location of all of them is configurable.
+By default, a Teleport node has the following files present. The location of all
+of them is configurable.
-Full path | Purpose
------------------------------|---------------------------
-`/etc/teleport.yaml` | Teleport configuration file (optional).
-`/usr/local/bin/teleport` | Teleport daemon binary.
-`/usr/local/bin/tctl` | Teleport admin tool. It is only needed for auth servers.
-`/var/lib/teleport` | Teleport data directory. Nodes keep their keys and certificates there. Auth servers store the audit log and the cluster keys there, but the audit log storage can be further configured via `auth_service` section in the config file.
+| Full path | Purpose |
+|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `/etc/teleport.yaml` | Teleport configuration file (optional).|
+| `/usr/local/bin/teleport` | Teleport daemon binary.|
+| `/usr/local/bin/tctl` | Teleport admin tool. It is only needed for auth servers.|
+| `/var/lib/teleport` | Teleport data directory. Nodes keep their keys and certificates there. Auth servers store the audit log and the cluster keys there, but the audit log storage can be further configured via `auth_service` section in the config file.|
## Configuration
-You should use a [configuration file](#configuration-file) to configure the `teleport` daemon.
-For simple experimentation, you can use command line flags with the `teleport start`
-command:
-
-```bsh
-$ teleport start --help
-usage: teleport start []
-
-Starts the Teleport service.
-
-Flags:
- -d, --debug Enable verbose logging to stderr
- --insecure-no-tls Disable TLS for the web socket
- -r, --roles Comma-separated list of roles to start with [proxy,node,auth]
- --pid-file Full path to the PID file. By default no PID file will be created
- --advertise-ip IP to advertise to clients if running behind NAT
- -l, --listen-ip IP address to bind to [0.0.0.0]
- --auth-server Address of the auth server [127.0.0.1:3025]
- --token Invitation token to register with an auth server [none]
- --ca-pin CA pin to validate the Auth Server
- --nodename Name of this node, defaults to hostname
- -c, --config Path to a configuration file [/etc/teleport.yaml]
- --labels List of labels for this node
- --insecure Insecure mode disables certificate validation
- --fips Start Teleport in FedRAMP/FIPS 140-2 mode.
-```
-
-### Configuration Flags
-
-Let's cover some of these flags in more detail:
-
-* `--insecure-no-tls` flag tells Teleport proxy to not generate default self-signed TLS
- certificates. This is useful when running Teleport on kubernetes (behind reverse
- proxy) or behind things like AWS ELBs, GCP LBs or Azure Load Balancers where SSL termination
- is provided externally.
- The possible values are `true` or `false`. The default value is `false`.
-
-* `--roles` flag tells Teleport which services to start. It is a comma-separated
- list of roles. The possible values are `auth`, `node` and `proxy`. The default
- value is `auth,node,proxy`. These roles are explained in the
- [Teleport Architecture](architecture.md) document.
-
-* `--advertise-ip` flag can be used when Teleport nodes are running behind NAT and
- their externally routable IP cannot be automatically determined.
- For example, assume that a host "foo" can be reached via `10.0.0.10` but there is
- no `A` DNS record for "foo", so you cannot connect to it via `tsh ssh foo`. If
- you start teleport on "foo" with `--advertise-ip=10.0.0.10`, it will automatically
- tell Teleport proxy to use that IP when someone tries to connect
- to "foo". This is also useful when connecting to Teleport nodes using their labels.
-
-* `--nodename` flag lets you assign an alternative name for the node which can be used
- by clients to login. By default it's equal to the value returned by `hostname`
- command.
-
-* `--listen-ip` should be used to tell `teleport` daemon to bind to a specific network
- interface. By default it listens on all.
-
-* `--labels` flag assigns a set of labels to a node. See the explanation
- of labeling mechanism in the [Labeling Nodes](#labeling-nodes) section below.
-
-* `--pid-file` flag creates a PID file if a path is given.
-
-* `--permit-user-env` flag reads in environment variables from `~/.tsh/environment`
- when creating a session.
+You should use a [configuration file](#configuration-file) to configure the
+[ `teleport` ](../cli-docs/#teleport) daemon. For simple experimentation, you can
+use command line flags with the [ `teleport start` ](./cli-docs/#teleport-start)
+command. Read about all the allowed flags in the [CLI
+Docs](./cli-docs/#teleport-start) or run `teleport start --help`
### Configuration File
-Teleport uses the YAML file format for configuration. A sample configuration file is shown below. By default, it is stored in `/etc/teleport.yaml`
+Teleport uses the YAML file format for configuration. A sample configuration
+file is shown below. By default, it is stored in `/etc/teleport.yaml`
!!! note "IMPORTANT":
- When editing YAML configuration, please pay attention to how your editor
- handles white space. YAML requires consistent handling of tab characters.
+ When editing YAML configuration, please pay attention to how your
+ editor handles white space. YAML requires consistent handling of
+ tab characters.
-```yaml
+``` yaml
# By default, this file should be stored in /etc/teleport.yaml
# This section of the configuration file applies to all teleport
@@ -264,10 +216,11 @@ teleport:
# list of auth servers in a cluster. you will have more than one auth server
# if you configure teleport auth to run in HA configuration.
- # If adding a node located behind NAT, use the Proxy URL. e.g.
+ # If adding a node located behind NAT, use the Proxy URL. e.g.
# auth_servers:
# - teleport-proxy.example.com:3080
auth_servers:
+
- 10.1.0.5:3025
- 10.1.0.6:3025
@@ -285,8 +238,8 @@ teleport:
# Configuration for the storage back-end used for the cluster state and the
# audit log. Several back-end types are supported. See "High Availability"
- # section of this Admin Manual below to learn how to configure DynamoDB,
- # S3, etcd and other highly available back-ends.
+ # section of this Admin Manual below to learn how to configure DynamoDB,
+ # S3, etcd and other highly available back-ends.
storage:
# By default teleport uses the `data_dir` directory on a local filesystem
type: dir
@@ -302,6 +255,7 @@ teleport:
# Cipher algorithms that the server supports. This section only needs to be
# set if you want to override the defaults.
ciphers:
+
- aes128-ctr
- aes192-ctr
- aes256-ctr
@@ -311,6 +265,7 @@ teleport:
# Key exchange algorithms that the server supports. This section only needs
# to be set if you want to override the defaults.
kex_algos:
+
- curve25519-sha256@libssh.org
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
@@ -319,6 +274,7 @@ teleport:
# Message authentication code (MAC) algorithms that the server supports.
# This section only needs to be set if you want to override the defaults.
mac_algos:
+
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-256
@@ -334,7 +290,6 @@ teleport:
- tls-ecdhe-rsa-with-chacha20-poly1305
- tls-ecdhe-ecdsa-with-chacha20-poly1305
-
# This section configures the 'auth service':
auth_service:
# Turns 'auth' role on. Default is 'yes'
@@ -366,6 +321,7 @@ auth_service:
app_id: https://localhost:3080
# facets must list all proxy servers if there are more than one deployed
facets:
+
- https://localhost:3080
# IP and the port to bind to. Other Teleport nodes will be connecting to
@@ -385,6 +341,7 @@ auth_service:
# We recommend to use tools like `pwgen` to generate sufficiently random
# tokens of 32+ byte length.
tokens:
+
- "proxy,node:xxxxx"
- "auth:yyyy"
@@ -407,10 +364,10 @@ auth_service:
# certificates expire in the middle of an active SSH session. (default is 'no')
disconnect_expired_cert: no
- # Determines the interval at which Teleport will send keep-alive messages. The
- # default value mirrors sshd at 15 minutes. keep_alive_count_max is the number
- # of missed keep-alive messages before the server tears down the connection to the
- # client.
+ # Determines the interval at which Teleport will send keep-alive messages. The
+ # default value mirrors sshd at 15 minutes. keep_alive_count_max is the number
+ # of missed keep-alive messages before the server tears down the connection to the
+ # client.
keep_alive_interval: 15
keep_alive_count_max: 3
@@ -422,7 +379,7 @@ auth_service:
# and should point to the license file obtained from Teleport Download Portal.
#
# If not set, by default Teleport will look for the `license.pem` file in
- # the configured `data_dir`.
+ # the configured `data_dir` .
license_file: /var/lib/teleport/license.pem
# DEPRECATED in Teleport 3.2 (moved to proxy_service section)
@@ -450,7 +407,9 @@ ssh_service:
# See "Labeling Nodes" section below for more information and more examples.
commands:
# this command will add a label 'arch=x86_64' to a node
+
- name: arch
+
command: ['/bin/uname', '-p']
period: 1h0m0s
@@ -489,8 +448,8 @@ proxy_service:
public_addr: proxy.example.com:3080
# The DNS name of the proxy SSH endpoint as accessible by cluster clients.
- # Defaults to the proxy's hostname if not specified. If running multiple proxies
- # behind a load balancer, this name must point to the load balancer.
+ # Defaults to the proxy's hostname if not specified. If running multiple proxies
+ # behind a load balancer, this name must point to the load balancer.
# Use a TCP load balancer because this port uses SSH protocol.
ssh_public_addr: proxy.example.com:3023
@@ -508,12 +467,12 @@ proxy_service:
listen_addr: 0.0.0.0:3026
# The DNS name of the Kubernetes proxy server that is accessible by cluster clients.
- # If running multiple proxies behind a load balancer, this name must point to the
+ # If running multiple proxies behind a load balancer, this name must point to the
# load balancer.
public_addr: ['kube.example.com:3026']
- # This setting is not required if the Teleport proxy service is
- # deployed inside a Kubernetes cluster. Otherwise, Teleport proxy
+ # This setting is not required if the Teleport proxy service is
+ # deployed inside a Kubernetes cluster. Otherwise, Teleport proxy
# will use the credentials from this file:
kubeconfig_file: /path/to/kube/config
```
@@ -521,40 +480,50 @@ proxy_service:
#### Public Addr
Notice that all three Teleport services (proxy, auth, node) have an optional
-`public_addr` property. The public address can take an IP or a DNS name.
-It can also be a list of values:
+`public_addr` property. The public address can take an IP or a DNS name. It can
+also be a list of values:
-```yaml
+``` yaml
public_addr: ["proxy-one.example.com", "proxy-two.example.com"]
```
-Specifying a public address for a Teleport service may be useful in the following use cases:
+Specifying a public address for a Teleport service may be useful in the
+following use cases:
* You have multiple identical services, like proxies, behind a load balancer.
-* You want Teleport to issue SSH certificate for the service with the
- additional principals, e.g. host names.
+* You want Teleport to issue SSH certificate for the service with the additional
+
+ principals, e.g.host names.
## Authentication
-Teleport uses the concept of "authentication connectors" to authenticate users when
-they execute `tsh login` command. There are three types of authentication connectors:
+Teleport uses the concept of "authentication connectors" to authenticate users
+when they execute [ `tsh login` ](../cli-docs/#tsh-login) command. There are three
+types of authentication connectors:
### Local Connector
-Local authentication is used to authenticate against a local Teleport user database. This database
-is managed by `tctl users` command. Teleport also supports second factor authentication
-(2FA) for the local connector. There are three possible values (types) of 2FA:
+Local authentication is used to authenticate against a local Teleport user
+database. This database is managed by [ `tctl users` ](./cli-docs/#tctl-users)
+command. Teleport also supports second factor authentication (2FA) for the local
+connector. There are three possible values (types) of 2FA:
- * `otp` is the default. It implements [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
- standard. You can use [Google Authenticator](https://en.wikipedia.org/wiki/Google_Authenticator) or
- [Authy](https://www.authy.com/) or any other TOTP client.
- * `u2f` implements [U2F](https://en.wikipedia.org/wiki/Universal_2nd_Factor) standard for utilizing hardware (USB)
- keys for second factor.
- * `off` turns off second factor authentication.
+ + `otp` is the default. It implements
-Here is an example of this setting in the `teleport.yaml`:
+ [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
+ standard. You can use [Google
+ Authenticator](https://en.wikipedia.org/wiki/Google_Authenticator) or
+ [Authy](https://www.authy.com/) or any other TOTP client.
-```yaml
+ + `u2f` implements [U2F](https://en.wikipedia.org/wiki/Universal_2nd_Factor)
+
+ standard for utilizing hardware (USB) keys for second factor.
+
+ + `off` turns off second factor authentication.
+
+Here is an example of this setting in the `teleport.yaml` :
+
+``` yaml
auth_service:
authentication:
type: local
@@ -563,13 +532,14 @@ auth_service:
### Github OAuth 2.0 Connector
-This connector implements Github OAuth 2.0 authentication flow. Please refer
-to Github documentation on [Creating an OAuth App](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/)
+This connector implements Github OAuth 2.0 authentication flow. Please refer to
+Github documentation on [Creating an OAuth
+App](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/)
to learn how to create and register an OAuth app.
-Here is an example of this setting in the `teleport.yaml`:
+Here is an example of this setting in the `teleport.yaml` :
-```yaml
+``` yaml
auth_service:
authentication:
type: github
@@ -579,13 +549,13 @@ See [Github OAuth 2.0](#github-oauth-20) for details on how to configure it.
### SAML
-This connector type implements SAML authentication. It can be configured
-against any external identity manager like Okta or Auth0. This feature is
-only available for Teleport Enterprise.
+This connector type implements SAML authentication. It can be configured against
+any external identity manager like Okta or Auth0. This feature is only available
+for Teleport Enterprise.
-Here is an example of this setting in the `teleport.yaml`:
+Here is an example of this setting in the `teleport.yaml` :
-```yaml
+``` yaml
auth_service:
authentication:
type: saml
@@ -596,25 +566,30 @@ auth_service:
Teleport implements OpenID Connect (OIDC) authentication, which is similar to
SAML in principle. This feature is only available for Teleport Enterprise.
-Here is an example of this setting in the `teleport.yaml`:
+Here is an example of this setting in the `teleport.yaml` :
-```yaml
+``` yaml
auth_service:
authentication:
type: oidc
```
-
### FIDO U2F
Teleport supports [FIDO U2F](https://www.yubico.com/about/background/fido/)
-hardware keys as a second authentication factor. By default U2F is disabled. To start using U2F:
+hardware keys as a second authentication factor. By default U2F is disabled. To
+start using U2F:
-* Enable U2F in Teleport configuration `/etc/teleport.yaml`.
-* For CLI-based logins you have to install [u2f-host](https://developers.yubico.com/libu2f-host/) utility.
-* For web-based logins you have to use Google Chrome, as it is the only browser supporting U2F at this time.
+* Enable U2F in Teleport configuration `/etc/teleport.yaml` .
+* For CLI-based logins you have to install
-```yaml
+ [u2f-host](https://developers.yubico.com/libu2f-host/) utility.
+
+* For web-based logins you have to use Google Chrome, as it is the only browser
+
+ supporting U2F at this time.
+
+``` yaml
# snippet from /etc/teleport.yaml to show an example configuration of U2F:
auth_service:
authentication:
@@ -630,25 +605,25 @@ auth_service:
- https://localhost:3080
```
-For single-proxy setups, the `app_id` setting can be equal to the domain name of the
-proxy, but this will prevent you from adding more proxies without changing the
-`app_id`. For multi-proxy setups, the `app_id` should be an HTTPS URL pointing to
-a JSON file that mirrors `facets` in the auth config.
+For single-proxy setups, the `app_id` setting can be equal to the domain name of
+the proxy, but this will prevent you from adding more proxies without changing
+the `app_id` . For multi-proxy setups, the `app_id` should be an HTTPS URL
+pointing to a JSON file that mirrors `facets` in the auth config.
!!! warning "Warning":
- The `app_id` must never change in the lifetime of the cluster. If the App ID
- changes, all existing U2F key registrations will become invalid and all users
- who use U2F as the second factor will need to re-register.
- When adding a new proxy server, make sure to add it to the list of "facets"
- in the configuration file, but also to the JSON file referenced by `app_id`
+ The `app_id` must never change in the lifetime of the
+ cluster. If the App ID changes, all existing U2F key registrations will
+ become invalid and all users who use U2F as the second factor will need to
+ re-register. When adding a new proxy server, make sure to add it to the list
+ of "facets" in the configuration file, but also to the JSON file referenced
+ by `app_id`
-
**Logging in with U2F**
-For logging in via the CLI, you must first install [u2f-host](https://developers.yubico.com/libu2f-host/).
-Installing:
+For logging in via the CLI, you must first install
+[u2f-host](https://developers.yubico.com/libu2f-host/). Installing:
-```yaml
+``` yaml
# OSX:
$ brew install libu2f-host
@@ -662,53 +637,59 @@ Then invoke `tsh ssh` as usual to authenticate:
tsh --proxy ssh
```
-!!! tip "Version Warning":
- External user identities are only supported in [Teleport Enterprise](/enterprise/). Please reach
- out to `sales@gravitational.com` for more information.
+!!! tip "Version Warning": External user identities are only supported in
+ [Teleport Enterprise](/enterprise/). Please reach out to
+
+`sales@gravitational.com` for more information.
## Adding and Deleting Users
-This section covers internal user identities, i.e. user accounts created and
+This section covers internal user identities, i.e.user accounts created and
stored in Teleport's internal storage. Most production users of Teleport use
-_external_ users via [Github](#github-oauth-20) or [Okta](ssh_okta) or any
-other SSO provider (Teleport Enterprise supports any SAML or OIDC compliant
-identity provider).
+_external_ users via [Github](#github-oauth-20) or [Okta](ssh_okta) or any other
+SSO provider (Teleport Enterprise supports any SAML or OIDC compliant identity
+provider).
A user identity in Teleport exists in the scope of a cluster. The member nodes
-of a cluster have multiple OS users on them. A Teleport administrator creates Teleport user accounts and maps them to the allowed OS user logins they can use.
+of a cluster have multiple OS users on them. A Teleport administrator creates
+Teleport user accounts and maps them to the allowed OS user logins they can use.
Let's look at this table:
|Teleport User | Allowed OS Logins | Description
|------------------|---------------|-----------------------------
-|joe | joe,root | Teleport user 'joe' can login into member nodes as OS user 'joe' or 'root'
+|joe | joe, root | Teleport user 'joe' can login into member nodes as OS user 'joe' or 'root'
|bob | bob | Teleport user 'bob' can login into member nodes only as OS user 'bob'
|ross | | If no OS login is specified, it defaults to the same name as the Teleport user.
-To add a new user to Teleport, you have to use the `tctl` tool on the same node where
-the auth server is running, i.e. `teleport` was started with `--roles=auth`.
+To add a new user to Teleport, you have to use the [ `tctl` ](../cli-docs/#tctl)
+tool on the same node where the auth server is running, i.e.
+[ `teleport` ](../cli-docs/#teleport) was started with `--roles=auth` .
-```bsh
+``` bsh
$ tctl users add joe joe,root
```
-Teleport generates an auto-expiring token (with a TTL of 1 hour) and prints the token
-URL which must be used before the TTL expires.
+Teleport generates an auto-expiring token (with a TTL of 1 hour) and prints the
+token URL which must be used before the TTL expires.
-```bsh
+``` bsh
Signup token has been created. Share this URL with the user:
https://:3080/web/newuser/xxxxxxxxxxxx
NOTE: make sure the host is accessible.
```
-The user completes registration by visiting this URL in their web browser, picking a password and
-configuring the 2nd factor authentication. If the credentials are correct, the auth
-server generates and signs a new certificate and the client stores this key and will use
-it for subsequent logins. The key will automatically expire after 12 hours by default after which
-the user will need to log back in with her credentials. This TTL can be configured to a different value. Once authenticated, the account will become visible via `tctl`:
+The user completes registration by visiting this URL in their web browser,
+picking a password and configuring the 2nd factor authentication. If the
+credentials are correct, the auth server generates and signs a new certificate
+and the client stores this key and will use it for subsequent logins. The key
+will automatically expire after 12 hours by default after which the user will
+need to log back in with her credentials. This TTL can be configured to a
+different value. Once authenticated, the account will become visible via `tctl`
+:
-```bsh
+``` bsh
$ tctl users ls
User Allowed Logins
@@ -721,29 +702,29 @@ joe joe,root
Joe would then use the `tsh` client tool to log in to member node "luna" via
bastion "work" _as root_:
-```yaml
+``` yaml
$ tsh --proxy=work --user=joe root@luna
```
To delete this user:
-```yaml
+``` yaml
$ tctl users rm joe
```
## Editing Users
-Users entries can be manipulated using the generic [resource commands](#resources)
-via `tctl`. For example, to see the full list of user records, an administrator
-can execute:
+Users entries can be manipulated using the generic [resource
+commands](#resources) via [ `tctl` ](../cli-docs/#tctl) . For example, to see the
+full list of user records, an administrator can execute:
-```yaml
+``` yaml
$ tctl get users
```
To edit the user "joe":
-```yaml
+``` yaml
# dump the user definition into a file:
$ tctl get user/joe > joe.yaml
# ... edit the contents of joe.yaml
@@ -752,19 +733,20 @@ $ tctl get user/joe > joe.yaml
$ tctl create -f joe.yaml
```
-Some fields in the user record are reserved for internal use. Some of them
-will be finalized and documented in the future versions. Fields like
-`is_locked` or `traits/logins` can be used starting in version 2.3
+Some fields in the user record are reserved for internal use. Some of them will
+be finalized and documented in the future versions. Fields like `is_locked` or
+`traits/logins` can be used starting in version 2.3
## Adding Nodes to the Cluster
-Teleport is a "clustered" system, meaning it only allows
-access to nodes (servers) that had been previously granted cluster membership.
+Teleport is a "clustered" system, meaning it only allows access to nodes
+(servers) that had been previously granted cluster membership.
A cluster membership means that a node receives its own host certificate signed
-by the cluster's auth server. To receive a host certificate upon joining a cluster,
-a new Teleport host must present an "invite token". An invite token also defines
-which role a new host can assume within a cluster: `auth`, `proxy` or `node`.
+by the cluster's auth server. To receive a host certificate upon joining a
+cluster, a new Teleport host must present an "invite token". An invite token
+also defines which role a new host can assume within a cluster: `auth` , `proxy`
+or `node` .
There are two ways to create invitation tokens:
@@ -773,19 +755,23 @@ There are two ways to create invitation tokens:
### Static Tokens
-Static tokens are defined ahead of time by an administrator and stored
-in the auth server's config file:
+Static tokens are defined ahead of time by an administrator and stored in the
+auth server's config file:
-```yaml
+``` yaml
# Config section in `/etc/teleport.yaml` file for the auth server
auth_service:
enabled: true
tokens:
# This static token allows new hosts to join the cluster as "proxy" or "node"
+
- "proxy,node:secret-token-value"
+
# A token can also be stored in a file. In this example the token for adding
# new auth servers is stored in /path/to/tokenfile
+
- "auth:/path/to/tokenfile"
+
```
### Short-lived Tokens
@@ -794,17 +780,18 @@ A more secure way to add nodes to a cluster is to generate tokens as they are
needed. Such token can be used multiple times until its time to live (TTL)
expires.
-Use the `tctl` tool to register a new invitation token (or it can also generate a new token
-for you). In the following example a new token is created with a TTL of 5 minutes:
+Use the [ `tctl` ](../cli-docs/#tctl) tool to register a new invitation token (or
+it can also generate a new token for you). In the following example a new token
+is created with a TTL of 5 minutes:
-```bsh
+``` bsh
$ tctl nodes add --ttl=5m --roles=node,proxy --token=secret-value
The invite token: secret-value
```
-If `--token` is not provided, `tctl` will generate one:
+If `--token` is not provided, [ `tctl` ](../cli-docs/#tctl) will generate one:
-```bsh
+``` bsh
# generate a short-lived invitation token for a new node:
$ tctl nodes add --ttl=5m --roles=node,proxy
The invite token: e94d68a8a1e5821dbd79d03a960644f0
@@ -821,10 +808,10 @@ $ tctl tokens rm e94d68a8a1e5821dbd79d03a960644f0
### Using Node Invitation Tokens
-Both static and short-lived tokens are used the same way. Execute the
-following command on a new node to add it to a cluster:
+Both static and short-lived tokens are used the same way. Execute the following
+command on a new node to add it to a cluster:
-```bsh
+``` bsh
# adding a new regular SSH node to the cluster:
$ teleport start --roles=node --token=secret-token-value --auth-server=10.0.10.5
@@ -835,11 +822,10 @@ $ teleport start --roles=node --token=secret-token-value --auth-server=teleport-
$ teleport start --roles=proxy --token=secret-token-value --auth-server=10.0.10.5
```
-As new nodes come online, they start sending ping requests every few seconds
-to the CA of the cluster. This allows users to explore cluster membership
-and size:
+As new nodes come online, they start sending ping requests every few seconds to
+the CA of the cluster. This allows users to explore cluster membership and size:
-```bsh
+``` bsh
$ tctl nodes ls
Node Name Node ID Address Labels
@@ -851,31 +837,31 @@ dijkstra c9s93fd9-3333-91d3-9999-c9s93fd98f43 10.1.0.6:3022 distro
### Untrusted Auth Servers
Teleport nodes use the HTTPS protocol to offer the join tokens to the auth
-server running on `10.0.10.5` in the example above. In a zero-trust
-environment, you must assume that an attacker can highjack the IP address of
-the auth server e.g. `10.0.10.5`.
+server running on `10.0.10.5` in the example above. In a zero-trust environment,
+you must assume that an attacker can highjack the IP address of the auth server
+e.g. `10.0.10.5` .
To prevent this from happening, you need to supply every new node with an
additional bit of information about the auth server. This technique is called
-"CA Pinning". It works by asking the auth server to produce a "CA Pin", which
-is a hashed value of it's private key, i.e. it cannot be forged by an attacker.
+"CA Pinning". It works by asking the auth server to produce a "CA Pin", which is
+a hashed value of it's private key, i.e.it cannot be forged by an attacker.
On the auth server:
-```bash
+``` bash
$ tctl status
-Cluster staging.example.com
-User CA never updated
-Host CA never updated
+Cluster staging.example.com
+User CA never updated
+Host CA never updated
CA pin sha256:7e12c17c20d9cb504bbcb3f0236be3f446861f1396dcbb44425fe28ec1c108f1
```
-The "CA pin" at the bottom needs to be passed to the new nodes when they're starting
-for the first time, i.e. when they join a cluster:
+The "CA pin" at the bottom needs to be passed to the new nodes when they're
+starting for the first time, i.e.when they join a cluster:
Via CLI:
-```bash
+``` bash
$ teleport start \
--roles=node \
--token=1ac590d36493acdaa2387bc1c492db1a \
@@ -885,31 +871,35 @@ $ teleport start \
or via `/etc/teleport.yaml` on a node:
-```yaml
+``` yaml
teleport:
auth_token: "1ac590d36493acdaa2387bc1c492db1a"
ca_pin: "sha256:7e12c17c20d9cb504bbcb3f0236be3f446861f1396dcbb44425fe28ec1c108f1"
auth_servers:
+
- "10.12.0.6:3025"
+
```
!!! warning "Warning":
- If a CA pin not provided, Teleport node will join a cluster but it will print
- a `WARN` message (warning) into it's standard error output.
+ If a CA pin not provided, Teleport node will join a
+ cluster but it will print a `WARN` message (warning) into it's standard
+ error output.
!!! warning "Warning":
- The CA pin becomes invalid if a Teleport administrator performs the CA
- rotation by executing `tctl auth rotate`.
+ The CA pin becomes invalid if a Teleport administrator
+ performs the CA rotation by executing
+ [ `tctl auth rotate` ](../cli-docs/#tctl-auth-rotate) .
## Revoking Invitations
-As you have seen above, Teleport uses tokens to invite users to a cluster (sign-up tokens) or
-to add new nodes to it (provisioning tokens).
+As you have seen above, Teleport uses tokens to invite users to a cluster
+(sign-up tokens) or to add new nodes to it (provisioning tokens).
-Both types of tokens can be revoked before they can be used. To see a list of outstanding tokens,
-run this command:
+Both types of tokens can be revoked before they can be used. To see a list of
+outstanding tokens, run this command:
-```bsh
+``` bsh
$ tctl tokens ls
Token Role Expiry Time (UTC)
@@ -919,14 +909,16 @@ eoKoh0caiw6weoGupahgh6Wuo7jaTee2 Proxy never
6fc5545ab78c2ea978caabef9dbd08a5 Signup 17 May 16 04:24 UTC
```
-In this example, the first token has a "never" expiry date because it is a static token configured via a config file.
+In this example, the first token has a "never" expiry date because it is a
+static token configured via a config file.
-The 2nd token with "Node" role was generated to invite a new node to this cluster. And the
-3rd token was generated to invite a new user.
+The 2nd token with "Node" role was generated to invite a new node to this
+cluster. And the 3rd token was generated to invite a new user.
-The latter two tokens can be deleted (revoked) via `tctl tokens del` command:
+The latter two tokens can be deleted (revoked) via [`tctl tokens
+del`](../cli-docs/#tctl-tokens-rm) command:
-```yaml
+``` yaml
$ tctl tokens del 696c0471453e75882ff70a761c1a8bfa
Token 696c0471453e75882ff70a761c1a8bfa has been deleted
```
@@ -937,31 +929,35 @@ In addition to specifying a custom nodename, Teleport also allows for the
application of arbitrary key:value pairs to each node, called labels. There are
two kinds of labels:
-1. `static labels` do not change over time, while `teleport` process is
- running. Examples of static labels are physical location of nodes, name of
- the environment (staging vs production), etc.
+1. `static labels` do not change over time, while
+
+ [ `teleport` ](../cli-docs/#teleport) process is running.
+
+ Examples of static labels are physical location of nodes, name of the
+ environment (staging vs production), etc.
-2. `dynamic labels` also known as "label commands" allow to generate labels at runtime.
- Teleport will execute an external command on a node at a configurable frequency and
- the output of a command becomes the label value. Examples include reporting load
- averages, presence of a process, time after last reboot, etc.
+2. `dynamic labels` also known as "label commands" allow to generate labels at
-There are two ways to configure node labels.
+ runtime. Teleport will execute an external command on a node at a
+ configurable frequency and the output of a command becomes the label value.
+ Examples include reporting load averages, presence of a process, time after
+ last reboot, etc.
+
+There are two ways to configure node labels.
1. Via command line, by using `--labels` flag to `teleport start` command.
2. Using `/etc/teleport.yaml` configuration file on the nodes.
+To define labels as command line arguments, use `--labels` flag like shown
+below. This method works well for static labels or simple commands:
-To define labels as command line arguments, use `--labels` flag like shown below.
-This method works well for static labels or simple commands:
-
-```yaml
+``` yaml
$ teleport start --labels uptime=[1m:"uptime -p"],kernel=[1h:"uname -r"]
```
Alternatively, you can update `labels` via a configuration file:
-```yaml
+``` yaml
ssh_service:
enabled: "yes"
# Static labels are simple key/value pairs:
@@ -972,25 +968,28 @@ ssh_service:
To configure dynamic labels via a configuration file, define a `commands` array
as shown below:
-```yaml
+``` yaml
ssh_service:
enabled: "yes"
# Dynamic labels AKA "commands":
commands:
- - name: arch
+
+ + name: arch
+
command: ['/path/to/executable', 'flag1', 'flag2']
# this setting tells teleport to execute the command above
# once an hour. this value cannot be less than one minute.
- period: 1h0m0s
+ period: 1h0m0s
```
-`/path/to/executable` must be a valid executable command (i.e. executable bit must be set)
-which also includes shell scripts with a proper [shebang line](https://en.wikipedia.org/wiki/Shebang_(Unix)).
+`/path/to/executable` must be a valid executable command (i.e.executable bit
+must be set) which also includes shell scripts with a proper [shebang
+line](https://en.wikipedia.org/wiki/Shebang_(Unix)).
-**Important:** notice that `command` setting is an array where the first element is
-a valid executable and each subsequent element is an argument, i.e:
+**Important:** notice that `command` setting is an array where the first element
+is a valid executable and each subsequent element is an argument, i.e:
-```yaml
+``` yaml
# valid syntax:
command: ["/bin/uname", "-m"]
@@ -1002,36 +1001,42 @@ command: ["/bin/uname -m"]
command: ["/bin/sh", "-c", "uname -a | egrep -o '[0-9]+\.[0-9]+\.[0-9]+'"]
```
-
## Audit Log
-Teleport logs every SSH event into its audit log. There are two components of the audit log:
+Teleport logs every SSH event into its audit log. There are two components of
+the audit log:
1. **SSH Events:** Teleport logs events like successful user logins along with
+
the metadata like remote IP address, time and the session ID.
-2. **Recorded Sessions:** Every SSH shell session is recorded and can be replayed
- later. The recording is done by the nodes themselves, by default, but can be configured
- to be done by the proxy.
-Refer to the ["Audit Log" chapter in the Teleport Architecture](architecture#audit-log)
-to learn more about how the audit Log and session recording are designed.
+2. **Recorded Sessions:** Every SSH shell session is recorded and can be
+
+ replayed later. The recording is done by the nodes themselves, by default,
+ but can be configured to be done by the proxy.
+
+Refer to the ["Audit Log" chapter in the Teleport
+Architecture](architecture#audit-log) to learn more about how the audit Log and
+session recording are designed.
### SSH Events
-Teleport supports multiple storage back-ends for storing the SSH events. The section below
-uses the `dir` backend as an example. `dir` backend uses the local filesystem of an
-auth server using the configurable `data_dir` directory.
+Teleport supports multiple storage back-ends for storing the SSH events. The
+section below uses the `dir` backend as an example. `dir` backend uses the local
+filesystem of an auth server using the configurable `data_dir` directory.
-For highly available (HA) configuration users can refer to [DynamoDB](#using-dynamodb) or [etcd](#using-etcd)
-chapters on how to configure the SSH events and recorded sessions to be stored
-on network storage. It is even possible to store the audit log in multiple places at the same time,
-see `audit_events_uri` setting in the sample configuration file above for how to do that.
+For highly available (HA) configuration users can refer to
+[DynamoDB](#using-dynamodb) or [etcd](#using-etcd) chapters on how to configure
+the SSH events and recorded sessions to be stored on network storage. It is even
+possible to store the audit log in multiple places at the same time, see
+`audit_events_uri` setting in the sample configuration file above for how to do
+that.
Let's examine the Teleport audit log using the `dir` backend. The event log is
-stored in `data_dir` under `log` directory, usually `/var/lib/teleport/log`.
+stored in `data_dir` under `log` directory, usually `/var/lib/teleport/log` .
Each day is represented as a file:
-```yaml
+``` yaml
$ ls -l /var/lib/teleport/log/
total 104
-rw-r----- 1 root root 31638 Jan 22 20:00 2017-01-23.00:00:00.log
@@ -1039,66 +1044,71 @@ total 104
-rw-r----- 1 root root 15815 Feb 32 22:54 2017-02-03.00:00:00.log
```
-The log files use JSON format. They are human-readable but can also be programmatically parsed.
-Each line represents an event and has the following format:
+The log files use JSON format. They are human-readable but can also be
+programmatically parsed. Each line represents an event and has the following
+format:
-```js
+``` js
{
- // Event type. See below for the list of all possible event types
- "event" : "session.start",
- // Teleport user name
- "user" : "ekontsevoy",
- // OS login
- "login" : "root",
- // Server namespace. This field is reserved for future use.
- "namespace" : "default",
- // Unique server ID.
- "server_id" : "f84f7386-5e22-45ff-8f7d-b8079742e63f",
- // Session ID. Can be used to replay the session.
- "sid" : "8d3895b6-e9dd-11e6-94de-40167e68e931",
- // Address of the SSH node
- "addr.local" : "10.5.l.15:3022",
- // Address of the connecting client (user)
- "addr.remote": "73.223.221.14:42146",
- // Terminal size
- "size" : "80:25",
- // Timestamp
- "time" : "2017-02-03T06:54:05Z"
+ // Event type. See below for the list of all possible event types
+ "event": "session.start",
+ // Teleport user name
+ "user": "ekontsevoy",
+ // OS login
+ "login": "root",
+ // Server namespace. This field is reserved for future use.
+ "namespace": "default",
+ // Unique server ID.
+ "server_id": "f84f7386-5e22-45ff-8f7d-b8079742e63f",
+ // Session ID. Can be used to replay the session.
+ "sid": "8d3895b6-e9dd-11e6-94de-40167e68e931",
+ // Address of the SSH node
+ "addr.local": "10.5.l.15:3022",
+ // Address of the connecting client (user)
+ "addr.remote": "73.223.221.14:42146",
+ // Terminal size
+ "size": "80:25",
+ // Timestamp
+ "time": "2017-02-03T06:54:05Z"
}
```
The possible event types are:
-Event Type | Description
-----------------|----------------
-auth | Authentication attempt. Adds the following fields: `{"success": "false", "error": "access denied"}`
-session.start | Started an interactive shell session.
-session.end | An interactive shell session has ended.
-session.join | A new user has joined the existing interactive shell session.
-session.leave | A user has left the session.
-exec | Remote command has been executed via SSH, like `tsh ssh root@node ls /`. The following fields will be logged: `{"command": "ls /", "exitCode": 0, "exitError": ""}`
-scp | Remote file copy has been executed. The following fields will be logged: `{"path": "/path/to/file.txt", "len": 32344, "action": "read" }`
-resize | Terminal has been resized.
-user.login | A user logged into web UI or via tsh. The following fields will be logged: `{"user": "alice@example.com", "method": "local"}`.
+| Event Type | Description |
+|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| auth | Authentication attempt. Adds the following fields: `{"success": "false", "error": "access denied"}` |
+| session.start | Started an interactive shell session.|
+| session.end | An interactive shell session has ended.|
+| session.join | A new user has joined the existing interactive shell session.|
+| session.leave | A user has left the session.|
+| exec | Remote command has been executed via SSH, like `tsh ssh root@node ls /` . The following fields will be logged: `{"command": "ls /", "exitCode": 0, "exitError": ""}` |
+| scp | Remote file copy has been executed. The following fields will be logged: `{"path": "/path/to/file.txt", "len": 32344, "action": "read" }` |
+| resize | Terminal has been resized.|
+| user.login | A user logged into web UI or via tsh. The following fields will be logged: `{"user": "alice@example.com", "method": "local"}` .|
### Recorded Sessions
-In addition to logging `session.start` and `session.end` events, Teleport also records the entire
-stream of bytes going to/from standard input and standard output of an SSH session.
+In addition to logging `session.start` and `session.end` events, Teleport also
+records the entire stream of bytes going to/from standard input and standard
+output of an SSH session.
-Teleport can store the recorded sessions in an [AWS S3 bucket](#using-dynamodb) or in a local
-filesystem (including NFS).
+Teleport can store the recorded sessions in an [AWS S3 bucket](#using-dynamodb)
+or in a local filesystem (including NFS).
-The recorded sessions are stored as raw bytes in the `sessions` directory under `log`.
-Each session consists of two files, both are named after the session ID:
+The recorded sessions are stored as raw bytes in the `sessions` directory under
+`log` . Each session consists of two files, both are named after the session ID:
1. `.bytes` file represents the raw session bytes and is somewhat
- human-readable, although you are better off using `tsh play` or
- the Web UI to replay it.
-2. `.log` file contains the copies of the event log entries that are related
- to this session.
-```yaml
+ human-readable, although you are better off using [`tsh
+ play`](../cli-docs/#tsh-play) or the Web UI to replay it.
+
+2. `.log` file contains the copies of the event log entries that are related to
+
+ this session.
+
+``` yaml
$ ls /var/lib/teleport/log/sessions/default
-rw-r----- 1 root root 506192 Feb 4 00:46 4c146ec8-eab6-11e6-b1b3-40167e68e931.session.bytes
-rw-r----- 1 root root 44943 Feb 4 00:46 4c146ec8-eab6-11e6-b1b3-40167e68e931.session.log
@@ -1106,63 +1116,67 @@ $ ls /var/lib/teleport/log/sessions/default
To replay this session via CLI:
-```yaml
+``` yaml
$ tsh --proxy=proxy play 4c146ec8-eab6-11e6-b1b3-40167e68e931
```
### Recording Proxy Mode
-See [Audit Log Architecture](architecture/#audit-log) to understand how the session
-recording works. By default, the recording is not
-available if a cluster runs `sshd` (the OpenSSH daemon) on the nodes.
+See [Audit Log Architecture](architecture/#audit-log) to understand how the
+session recording works. By default, the recording is not available if a cluster
+runs `sshd` (the OpenSSH daemon) on the nodes.
To enable session recording for `sshd` nodes, the cluster must be switched to
-"recording proxy" mode. In this mode, the recording will be done on the proxy level:
+"recording proxy" mode. In this mode, the recording will be done on the proxy
+level:
-```yaml
+``` yaml
# snippet from /etc/teleport.yaml
auth_service:
session_recording: "proxy" # can also be "off" and "node" (default)
```
-Next, `sshd` must be told to allow users to log in with certificates generated by the
-Teleport User CA. Start by exporting the Teleport CA public key:
+Next, `sshd` must be told to allow users to log in with certificates generated
+by the Teleport User CA. Start by exporting the Teleport CA public key:
-```yaml
+``` yaml
$ tctl auth export --type=user
```
-To allow access to a single user, copy the above output to `~/.ssh/authorized_keys`. To apply this for all users, remove "cert-authority" from the start of `tctl` output and
-copy it to `/etc/ssh/teleport_user_ca.pub`.
+To allow access to a single user, copy the above output to
+`~/.ssh/authorized_keys` . To apply this for all users, remove "cert-authority"
+from the start of [ `tctl` ](../cli-docs/#tctl) output and copy it to
+`/etc/ssh/teleport_user_ca.pub` .
-Add the following line to `/etc/ssh/sshd_config`:
+Add the following line to `/etc/ssh/sshd_config` :
-```yaml
+``` yaml
TrustedUserCAKeys /etc/ssh/teleport_user_ca.pub
```
-Now `sshd` will trust users who present a Teleport-issued certificate. The next step
-is to configure host authentication.
+Now `sshd` will trust users who present a Teleport-issued certificate. The next
+step is to configure host authentication.
When in recording mode, Teleport will check that the host certificate of the
node a user connects to is signed by a Teleport CA. By default this is a strict
check. If the node presents just a key, or a certificate signed by a different
-CA, Teleport will reject this connection with the error message saying
-_"ssh: handshake failed: remote host presented a public key, expected a host certificate"_
+CA, Teleport will reject this connection with the error message saying _"ssh:
+handshake failed: remote host presented a public key, expected a host
+certificate"_
-You can disable strict host checks as shown below. However, this opens the possibility for
-Man-in-the-Middle (MITM) attacks and is not recommended.
+You can disable strict host checks as shown below. However, this opens the
+possibility for Man-in-the-Middle (MITM) attacks and is not recommended.
-```yaml
+``` yaml
# snippet from /etc/teleport.yaml
auth_service:
proxy_checks_host_keys: no
```
-The recommended solution is to ask Teleport to issue valid host certificates for all OpenSSH
-nodes. To generate a host certificate run this on your auth server:
+The recommended solution is to ask Teleport to issue valid host certificates for
+all OpenSSH nodes. To generate a host certificate run this on your auth server:
-```yaml
+``` yaml
$ tctl auth sign \
--host=node.example.com \
--format=openssh
@@ -1170,18 +1184,19 @@ $ tctl auth sign \
Then add the following lines to `/etc/ssh/sshd_config` and restart sshd.
-```yaml
+``` yaml
HostKey /etc/ssh/teleport_host_key
HostCertificate /etc/ssh/teleport_host_key-cert.pub
```
-Now you can use `tsh ssh user@host.example.com` to login into any `sshd` node in the cluster
-and the session will be recorded. If you want to use OpenSSH `ssh` client for logging
-into `sshd` servers behind a proxy in "recording mode", you have to tell the `ssh` client
-to use the jump host and enable the agent forwarding, otherwise a recording proxy will not
-be able to terminate the SSH connection to record it:
+Now you can use [ `tsh ssh user@host.example.com` ](../cli-docs/#tsh) to login
+into any `sshd` node in the cluster and the session will be recorded. If you
+want to use OpenSSH `ssh` client for logging into `sshd` servers behind a proxy
+in "recording mode", you have to tell the `ssh` client to use the jump host and
+enable the agent forwarding, otherwise a recording proxy will not be able to
+terminate the SSH connection to record it:
-```bsh
+``` bsh
# Note that agent forwarding is enabled twice: one from a client to a proxy
# (mandatory if using a recording proxy), and then optionally from a proxy
# to the end server if you want your agent running on the end server or not
@@ -1191,15 +1206,17 @@ ssh -o "ForwardAgent yes" \
```
!!! tip "Tip":
- To avoid typing all this and use the usual `ssh user@host.example.com`, users can update their
- `~/.ssh/config` file. See "Using Teleport with OpenSSH" chapter for more examples.
+ To avoid typing all this and use the usual `ssh
+ user@host.example.com `, users can update their ` ~/.ssh/config` file. See
+ "Using Teleport with OpenSSH" chapter for more examples.
**IMPORTANT**
-It's important to remember that SSH agent forwarding must be enabled on the client.
-Verify that a Teleport certificate is loaded into the agent after logging in:
+It's important to remember that SSH agent forwarding must be enabled on the
+client. Verify that a Teleport certificate is loaded into the agent after
+logging in:
-```bsh
+``` bsh
# Login as Joe
$ tsh login --proxy=proxy.example.com joe
# see if the certificate is present (look for "teleport:joe") at the end of the cert
@@ -1207,23 +1224,26 @@ $ ssh-add -L
```
!!! warning "GNOME Keyring SSH Agent":
- It is well-known that Gnome Keyring SSH agent, used by many popular Linux
- desktops like Ubuntu, does not support SSH certificates. We recommend using
- the `ssh-agent` command from `openssh-client` package.
+ It is well-known that Gnome Keyring SSH
+ agent, used by many popular Linux desktops like Ubuntu, does not support SSH
+ certificates. We recommend using the `ssh-agent` command from
+
+`openssh-client` package.
### OpenSSH Rate Limiting
-When using a Teleport proxy in "recording mode", be aware of
-OpenSSH built-in rate limiting. On large number of proxy connections you may encounter errors like:
+When using a Teleport proxy in "recording mode", be aware of OpenSSH built-in
+rate limiting. On large number of proxy connections you may encounter errors
+like:
-```bsh
+``` bsh
channel 0: open failed: connect failed: ssh: handshake failed: EOF
```
-See `MaxStartups` setting in `man sshd_config`. This setting means that by
+See `MaxStartups` setting in `man sshd_config` . This setting means that by
default OpenSSH only allows 10 unauthenticated connections at a time and starts
-dropping connections 30% of the time when the number of connections goes over
-10 and when it hits 100 authentication connections, all new connections are
+dropping connections 30% of the time when the number of connections goes over 10
+and when it hits 100 authentication connections, all new connections are
dropped.
To increase the concurrency level, increase the value to something like
@@ -1233,51 +1253,62 @@ MaxStartups 50:30:100. This allows 50 concurrent connections and a max of 100.
A Teleport administrator has two tools to configure a Teleport cluster:
-- The [configuration file](#configuration) is used for static configuration like the cluster name.
-- The `tctl` admin tool is used for manipulating dynamic records
-like Teleport users.
+* The [configuration file](#configuration) is used for static configuration like
+
+ the cluster name.
+
+* The [ `tctl` ](../cli-docs/#tctl) admin tool is used for manipulating dynamic
+
+ records like Teleport
+
+ users.
-`tctl` has convenient subcommands for dynamic configuration, like `tctl users` or `tctl nodes`.
-However, for dealing with more advanced topics, like connecting clusters together or
-troubleshooting trust, `tctl` offers the more powerful, although lower-level
-CLI interface called `resources`.
+[ `tctl` ](../cli-docs/#tctl) has convenient subcommands for dynamic
+configuration, like `tctl users` or `tctl nodes` . However, for dealing with
+more advanced topics, like connecting clusters together or troubleshooting
+trust, [ `tctl` ](../cli-docs/#tctl) offers the more powerful, although
+lower-level CLI interface called `resources` .
The concept is borrowed from the REST programming pattern. A cluster is composed
of different objects (aka, resources) and there are just four common operations
-that can be performed on them: `get`, `create`, `remove`.
+that can be performed on them: `get` , `create` , `remove` .
-A resource is defined as a [YAML](https://en.wikipedia.org/wiki/YAML) file. Every resource in Teleport has three required fields:
+A resource is defined as a [YAML](https://en.wikipedia.org/wiki/YAML) file.
+Every resource in Teleport has three required fields:
* `Kind` - The type of resource
* `Name` - A required field in the `metadata` to uniquely identify the resource
* `Version` - The version of the resource format
-Everything else is resource-specific and any component of a Teleport cluster can be
-manipulated with just 3 CLI commands:
+Everything else is resource-specific and any component of a Teleport cluster can
+be manipulated with just 3 CLI commands:
-Command | Description | Examples
-----------------|-------------|----------
-`tctl get` | Get one or multiple resources | `tctl get users` or `tctl get user/joe`
-`tctl rm` | Delete a resource by type/name | `tctl rm user/joe`
-`tctl create` | Create a new resource from a YAML file. Use `-f` to override / update | `tctl create -f joe.yaml`
+| Command | Description | Examples |
+|---------------|-----------------------------------------------------------------------|-----------------------------------------|
+| [ `tctl get` ](../cli-docs/#tctl=get) | Get one or multiple resources | `tctl get users` or `tctl get user/joe` |
+| [ `tctl rm` ](../cli-docs/#tctl-rm) | Delete a resource by type/name | `tctl rm user/joe` |
+| [ `tctl create` ](../cli-docs/#tctl-create) | Create a new resource from a YAML file. Use `-f` to override / update | `tctl create -f joe.yaml` |
!!! warning "YAML Format":
By default Teleport uses [YAML format](https://en.wikipedia.org/wiki/YAML)
- to describe resources. YAML is a wonderful and very human-readable
- alternative to JSON or XML, but it's sensitive to white space. Pay
- attention to spaces vs tabs!
+ to describe resources. YAML is a
+ wonderful and very human-readable alternative to JSON or XML, but it's
+ sensitive to white space. Pay attention to spaces vs tabs!
-Here's an example how the YAML resource definition for a user Joe might look like.
-It can be retrieved by executing `tctl get user/joe`
+Here's an example how the YAML resource definition for a user Joe might look
+like. It can be retrieved by executing [`tctl get
+user/joe`](../cli-docs/#tctl-get)
-```yaml
+``` yaml
kind: user
version: v2
metadata:
name: joe
spec:
roles:
- - admin
+
+ + admin
+
status:
# users can be temporarily locked in a Teleport system, but this
# functionality is reserved for internal use for now.
@@ -1288,8 +1319,10 @@ spec:
# these are "allowed logins" which are usually specified as the
# last argument to `tctl users add`
logins:
+
- joe
- root
+
# any resource in Teleport can automatically expire.
expires: 0001-01-01T00:00:00Z
# for internal use only
@@ -1300,24 +1333,23 @@ spec:
```
!!! tip "Note":
- Some of the fields you will see when printing resources are used only
- internally and are not meant to be changed. Others are reserved for future
- use.
-
-Here's the list of resources currently exposed via `tctl`:
+ Some of the fields you will see when printing resources are used
+ only internally and are not meant to be changed. Others are reserved for
+ future use.
-Resource Kind | Description
-----------------|--------------
-user | A user record in the internal Teleport user DB.
-node | A registered SSH node. The same record is displayed via `tctl nodes ls`
-cluster | A trusted cluster. See [here](#trusted-clusters) for more details on connecting clusters together.
-role | A role assumed by users. The open source Teleport only includes one role: "admin", but Enterprise teleport users can define their own roles.
-connector | Authentication connectors for [single sign-on](ssh_sso) (SSO) for SAML, OIDC and Github.
+Here's the list of resources currently exposed via [ `tctl` ](../cli-docs/#tctl) :
+| Resource Kind | Description |
+|---------------|----------------------------------------------------------------------------------------------------------------------------------------------|
+| user | A user record in the internal Teleport user DB.|
+| node | A registered SSH node. The same record is displayed via `tctl nodes ls` |
+| cluster | A trusted cluster. See [here](#trusted-clusters) for more details on connecting clusters together.|
+| role | A role assumed by users. The open source Teleport only includes one role: "admin", but Enterprise teleport users can define their own roles.|
+| connector | Authentication connectors for [single sign-on](ssh_sso) (SSO) for SAML, OIDC and Github.|
**Examples:**
-```yaml
+``` yaml
# list all connectors:
$ tctl get connectors
@@ -1334,9 +1366,9 @@ $ tctl rm users/admin
## Trusted Clusters
As explained in the [architecture document](architecture/#core-concepts),
-Teleport can partition compute infrastructure into multiple clusters.
-A cluster is a group of nodes connected to the cluster's auth server,
-acting as a certificate authority (CA) for all users and nodes.
+Teleport can partition compute infrastructure into multiple clusters. A cluster
+is a group of nodes connected to the cluster's auth server, acting as a
+certificate authority (CA) for all users and nodes.
To retrieve an SSH certificate, users must authenticate with a cluster through a
proxy server. So, if users want to connect to nodes belonging to different
@@ -1347,10 +1379,11 @@ The concept of trusted clusters allows Teleport administrators to connect
multiple clusters together and establish trust between them. Trusted clusters
allow users of one cluster to seamlessly SSH into the nodes of another cluster
without having to "hop" between proxy servers. Moreover, users don't even need
-to have a direct connection to other clusters' proxy servers. Trusted clusters also have their own restrictions on user access. The user
-experience looks like this:
+to have a direct connection to other clusters' proxy servers. Trusted clusters
+also have their own restrictions on user access. The user experience looks like
+this:
-```yaml
+``` yaml
# login using the "main" cluster credentials:
$ tsh login --proxy=main.example.com
@@ -1367,10 +1400,11 @@ $ tsh clusters
### Selecting the Default Cluster
-To avoid using `--cluster` switch with `tsh` commands, you can also specify
-which trusted cluster you want to become the default from the start:
+To avoid using `--cluster` switch with [ `tsh` ](../cli-docs/#tsh) commands, you
+can also specify which trusted cluster you want to become the default from the
+start:
-```yaml
+``` yaml
# login into "main" but request "east" to be the default for subsequent
# tsh commands:
$ tsh login --proxy=main.example.com east
@@ -1382,33 +1416,55 @@ The design of trusted clusters allows Teleport users to connect to compute
infrastructure located behind firewalls without any open TCP ports. The real
world usage examples of this capability include:
-* Managed service providers (MSP) remotely managing infrastructure of their clients.
-* Device manufacturers remotely maintaining computing appliances
- deployed on premises.
-* Large cloud software vendors manage multiple data centers using a common proxy.
+* Managed service providers (MSP) remotely managing infrastructure of their
-Let's take a look at how a connection is established between the "main" cluster and the "east" cluster:
+ clients.
+
+* Device manufacturers remotely maintaining computing appliances deployed on
+
+ premises.
+
+* Large cloud software vendors manage multiple data centers using a common
+
+ proxy.
+
+Let's take a look at how a connection is established between the "main" cluster
+and the "east" cluster:

This setup works as follows:
-1. The "east" creates an outbound reverse SSH tunnel to "main" and keeps the tunnel open.
-2. **Accessibility only works in one direction.** The "east" cluster allows users from "main" to access its nodes but users in the "east" cluster can not access the "main" cluster.
-3. When a user tries to connect to a node inside "east" using main's proxy, the reverse tunnel from step 1 is used to establish this connection shown as the green line above.
+1. The "east" creates an outbound reverse SSH tunnel to "main" and keeps the
+
+ tunnel open.
+
+2. **Accessibility only works in one direction.** The "east" cluster allows
+
+ users from "main" to access its nodes but users in the "east" cluster can not
+ access the "main" cluster.
+
+3. When a user tries to connect to a node inside "east" using main's proxy, the
+
+ reverse tunnel from step 1 is used to establish this connection shown as the
+ green line above.
!!! tip "Load Balancers":
- The scheme above also works even if the "main" cluster uses multiple
- proxies behind a load balancer (LB) or a DNS entry with multiple values.
- This works by "east" establishing a tunnel to _every_ proxy in "main",
- assuming that an LB uses round-robin or a similar non-sticky balancing
- algorithm.
+ The scheme above also works even if the "main" cluster
+ uses multiple proxies behind a load balancer (LB) or a DNS entry with
+ multiple values. This works by "east" establishing a tunnel to _every_ proxy
+ in "main", assuming that an LB uses round-robin or a similar non-sticky
+ balancing algorithm.
### Example Configuration
-Connecting two clusters together is similar to [adding nodes](#adding-nodes-to-the-cluster):
+Connecting two clusters together is similar to [adding
+nodes](#adding-nodes-to-the-cluster):
+
+1. Generate an invitation token on "main" cluster, or use a pre-defined static
+
+ token.
-1. Generate an invitation token on "main" cluster, or use a pre-defined static token.
2. On the "east" side, create a trusted cluster [resource](#resources).
**Creating a Cluster Join Token**
@@ -1418,18 +1474,20 @@ in `/etc/teleport.yaml` or you can generate an auto-expiring token:
To define a static cluster join token using the configuration file on "main":
-```yaml
+``` yaml
# fragment of /etc/teleport.yaml:
auth_service:
enabled: true
tokens:
- - trusted_cluster:secret-token-to-add-new-clusters
+
+ + trusted_cluster:secret-token-to-add-new-clusters
+
```
-If you wish to use auto-expiring cluster tokens, execute this CLI command on
-the "main" side:
+If you wish to use auto-expiring cluster tokens, execute this CLI command on the
+"main" side:
-```yaml
+``` yaml
$ tctl tokens add --type=trusted_cluster
The cluster invite token: generated-token-to-add-new-clusters
```
@@ -1438,7 +1496,7 @@ The cluster invite token: generated-token-to-add-new-clusters
Now, the administrator of "east" must create the following resource file:
-```yaml
+``` yaml
# cluster.yaml
kind: trusted_cluster
version: v2
@@ -1460,13 +1518,15 @@ spec:
# the role mapping allows to map user roles from one cluster to another
# (enterprise editions of Teleport only)
role_map:
+
- remote: "admin" # users who have "admin" role on "main"
+
local: ["auditor"] # will be assigned "auditor" role when logging into "east"
```
-Then, use `tctl create` to add the file:
+Then, use [ `tctl create` ](../cli-docs/#tctl-create) to add the file:
-```yaml
+``` yaml
$ tctl create cluster.yaml
```
@@ -1474,18 +1534,23 @@ At this point the users of the main cluster should be able to see "east" in the
list of available clusters.
!!! warning "HTTPS configuration":
- If the `web_proxy_addr` endpoint of the main cluster uses a self-signed or
- invalid HTTPS certificate, you will get an error: _"the trusted cluster
- uses misconfigured HTTP/TLS certificate"_. For ease of testing the teleport
- daemon of "east" can be started with `--insecure` CLI flag to accept
- self-signed certificates. Make sure to configure HTTPS properly and remove
- the insecure flag for production use.
+ If the `web_proxy_addr` endpoint of the main
+ cluster uses a self-signed or invalid HTTPS certificate, you will get an
+ error: _"the trusted cluster uses misconfigured HTTP/TLS certificate"_. For
+ ease of testing the teleport daemon of "east" can be started with
+
+`--insecure` CLI flag to accept self-signed certificates. Make sure to configure
+
+ HTTPS properly and remove the insecure flag for production use.
### Using Trusted Clusters
-As mentioned above, accessibility is only granted in one direction. So, only users from the "main" (trusted cluster) can now access nodes in the "east" (trusting cluster). Users in the "east" cluster will not be able to access the "main" cluster.
+As mentioned above, accessibility is only granted in one direction. So, only
+users from the "main" (trusted cluster) can now access nodes in the "east"
+(trusting cluster). Users in the "east" cluster will not be able to access the
+"main" cluster.
-```bsh
+``` bsh
# login into the main cluster:
$ tsh --proxy=proxy.main login joe
@@ -1511,30 +1576,35 @@ $ tsh ssh --cluster=east root@db1.east
### Disabling Trust
-To temporarily disable trust between clusters, i.e. to disconnect the "east"
+To temporarily disable trust between clusters, i.e.to disconnect the "east"
cluster from "main", edit the YAML definition of the trusted cluster resource
and set `enabled` to "false", then update it:
-```yaml
+``` yaml
$ tctl create --force cluster.yaml
```
If you want to _permanently_ disconnect one cluster from the other:
-```yaml
+``` yaml
# execute this command on "main" side to disconnect "east":
$ tctl rm tc/east
```
-While accessibility is only granted in one direction, trust is granted in both directions. If you remote "east" from "main", the following will happen:
+While accessibility is only granted in one direction, trust is granted in both
+directions. If you remote "east" from "main", the following will happen:
+
+* Two clusters will be disconnected, because "main" will drop the inbound SSH
+
+ tunnel connection from "east" and will not allow a new one.
-* Two clusters will be disconnected, because "main" will drop the inbound SSH tunnel connection from "east" and will not allow a new one.
* "main" will stop trusting certificates issued by "east".
* "east" will continue to trust certificates issued by "main".
-If you wish to permanently remove all trust relationships and the connections between both clusters:
+If you wish to permanently remove all trust relationships and the connections
+between both clusters:
-```yaml
+``` yaml
# execute on "main":
$ tctl rm tc/east
# execute on "east":
@@ -1547,16 +1617,18 @@ Take a look at [Trusted Clusters Guide](trustedclusters) to learn more about
advanced topics:
* Using dynamic cluster join tokens instead of pre-defined static tokens for
+
enhanced security.
+
* Defining role-mapping between clusters (Teleport Enterprise only).
## Github OAuth 2.0
Teleport supports authentication and authorization via external identity
-providers such as Github. First, the Teleport auth service must be configured
-to use Github for authentication:
+providers such as Github. First, the Teleport auth service must be configured to
+use Github for authentication:
-```yaml
+``` yaml
# snippet from /etc/teleport.yaml
auth_service:
authentication:
@@ -1565,7 +1637,7 @@ auth_service:
Next step is to define a Github connector:
-```yaml
+``` yaml
# Create a file called github.yaml:
kind: github
version: v3
@@ -1583,10 +1655,13 @@ spec:
redirect_url: https:///v1/webapi/github/callback
# mapping of org/team memberships onto allowed logins and roles
teams_to_logins:
+
- organization: octocats # Github organization name
+
team: admins # Github team name within that organization
# allowed logins for users in this org/team
logins:
+
- root
# List of Kubernetes groups this Github team is allowed to connect to
@@ -1595,34 +1670,36 @@ spec:
```
!!! note
- For open-source Teleport the `logins` field contains a list of allowed OS
- logins. For the commercial Teleport Enterprise offering, which supports
+ For open-source Teleport the `logins` field contains a list of allowed
+ OS logins. For the commercial Teleport Enterprise offering, which supports
role-based access control, the same field is treated as a list of _roles_
that users from the matching org/team assume after going through the
authorization flow.
-To obtain client ID and client secret, please follow Github documentation
-on how to [create and register an OAuth app](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/).
-Be sure to set the "Authorization callback URL" to the same value as `redirect_url`
-in the resource spec.
+To obtain client ID and client secret, please follow Github documentation on how
+to [create and register an OAuth
+app](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/).
+Be sure to set the "Authorization callback URL" to the same value as
+`redirect_url` in the resource spec.
-Finally, create the connector using `tctl` [resource](#resources) management command:
+Finally, create the connector using [ `tctl` ](../cli-docs/#tctl)
+[resource](#resources) management command:
-```yaml
+``` yaml
$ tctl create github.yaml
```
!!! tip
When going through the Github authentication flow for the first time,
- the application must be granted the access to all organizations that
- are present in the "teams to logins" mapping, otherwise Teleport will
- not be able to determine team memberships for these orgs.
+ the application must be granted the access to all organizations that are
+ present in the "teams to logins" mapping, otherwise Teleport will not be
+ able to determine team memberships for these orgs.
## HTTP CONNECT Proxies
Some networks funnel all connections through a proxy server where they can be
-audited and access control rules are applied. For these scenarios Teleport supports
-HTTP CONNECT tunneling.
+audited and access control rules are applied. For these scenarios Teleport
+supports HTTP CONNECT tunneling.
To use HTTP CONNECT tunneling, simply set either the `HTTPS_PROXY` or
`HTTP_PROXY` environment variables and when Teleport builds and establishes the
@@ -1632,13 +1709,15 @@ Specifically, if using the default configuration, Teleport will tunnel ports
proxy.
The value of `HTTPS_PROXY` or `HTTP_PROXY` should be in the format
-`scheme://host:port` where scheme is either `https` or `http`. If the
-value is `host:port`, Teleport will prepend `http`.
+`scheme://host:port` where scheme is either `https` or `http` . If the value is
+`host:port` , Teleport will prepend `http` .
-It's important to note that in order for Teleport to use HTTP CONNECT tunnelling, the `HTTP_PROXY` and `HTTPS_PROXY`
-environment variables must be set within Teleport's environment. You can also optionally set the `NO_PROXY` environment
-variable to avoid use of the proxy when accessing specified hosts/netmasks. When launching Teleport with systemd, this
-will probably involve adding some lines to your systemd unit file:
+It's important to note that in order for Teleport to use HTTP CONNECT
+tunnelling, the `HTTP_PROXY` and `HTTPS_PROXY` environment variables must be set
+within Teleport's environment. You can also optionally set the `NO_PROXY`
+environment variable to avoid use of the proxy when accessing specified
+hosts/netmasks. When launching Teleport with systemd, this will probably involve
+adding some lines to your systemd unit file:
```
[Service]
@@ -1648,18 +1727,19 @@ Environment="NO_PROXY=localhost,127.0.0.1,192.168.0.0/16,172.16.0.0/12,10.0.0.0/
```
!!! tip "Note":
- `localhost` and `127.0.0.1` are invalid values for the proxy host. If for
- some reason your proxy runs locally, you'll need to provide some other DNS
- name or a private IP address for it.
+ `localhost` and `127.0.0.1` are invalid values for the proxy
+ host. If for some reason your proxy runs locally, you'll need to provide
+ some other DNS name or a private IP address for it.
## PAM Integration
-Teleport SSH daemon can be configured to integrate with [PAM](https://en.wikipedia.org/wiki/Linux_PAM).
-This allows Teleport to create user sessions using PAM session profiles.
+Teleport SSH daemon can be configured to integrate with
+[PAM](https://en.wikipedia.org/wiki/Linux_PAM). This allows Teleport to create
+user sessions using PAM session profiles.
To enable PAM on a given Linux machine, update `/etc/teleport.yaml` with:
-```yaml
+``` yaml
teleport:
ssh_service:
pam:
@@ -1675,61 +1755,68 @@ removed if you uninstall `openssh-server` package. We recommend creating your
own PAM service file like `/etc/pam.d/teleport` and specifying it as
`service_name` above.
-
## Using Teleport with OpenSSH
Teleport is a standards-compliant SSH proxy and it can work in environments with
existing SSH implementations, such as OpenSSH. This section will cover:
-* Configuring OpenSSH client `ssh` to login into nodes inside a Teleport cluster.
+* Configuring OpenSSH client `ssh` to login into nodes inside a Teleport
+
+ cluster.
+
* Configuring OpenSSH server `sshd` to join a Teleport cluster.
### Using OpenSSH Client
-It is possible to use the OpenSSH client `ssh` to connect to nodes within a Teleport
-cluster. Teleport supports SSH subsystems and includes a `proxy` subsystem that
-can be used like `netcat` is with `ProxyCommand` to connect through a jump host.
+It is possible to use the OpenSSH client `ssh` to connect to nodes within a
+Teleport cluster. Teleport supports SSH subsystems and includes a `proxy`
+subsystem that can be used like `netcat` is with `ProxyCommand` to connect
+through a jump host.
-First, you need to export the public keys of cluster members. This has to be done
-on a node which runs Teleport auth server:
+First, you need to export the public keys of cluster members. This has to be
+done on a node which runs Teleport auth server:
-```bash
+``` bash
$ tctl auth export --type=host > cluster_node_keys
```
-```bash
+``` bash
$ cat cluster_node_keys
@cert-authority *.graviton-auth ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLNduBoHQaqi+kgkq3gLYjc6JIyBBnCFLgm63b5rtmWl/CJD7T9HWHxZphaS1jra6CWdboLeTp6sDUIKZ/Qw1MKFlfoqZZ8k6to43bxx7DvAHs0Te4WpuS/YRmWFhb6mMVOa8Rd4/9jE+c0f9O/t7X4m5iR7Fp7Tt+R/pjJfr03Loi6TYP/61AgXD/BkVDf+IcU4+9nknl+kaVPSGcPS9/Vbni1208Q+VN7B7Umy71gCh02gfv3rBGRgjT/cRAivuVoH/z3n5UwWg+9R3GD/l+XZKgv+pfe3OHoyDFxYKs9JaX0+GWc504y3Grhos12Lb8sNmMngxxxQ/KUDOV9z+R type=host
```
-!!! tip "Note":
- When sharing the @cert-authority make sure that the URL for the proxy is correct.
- In the above example, `*.graviton-auth` should be changed to teleport.example.com.
+!!! tip "Note":
+ When sharing the @cert-authority make sure that the URL for the
+ proxy is correct. In the above example, `*.graviton-auth` should be changed to
+ teleport.example.com.
-On your client machine, you need to import these keys. It will allow your OpenSSH client
-to verify that host's certificates are signed by the trusted CA key:
+On your client machine, you need to import these keys. It will allow your
+OpenSSH client to verify that host's certificates are signed by the trusted CA
+key:
-```yaml
+``` yaml
$ cat cluster_node_keys >> ~/.ssh/known_hosts
```
-Make sure you are running OpenSSH's `ssh-agent`, and have logged in to the Teleport proxy:
+Make sure you are running OpenSSH's `ssh-agent` , and have logged in to the
+Teleport proxy:
-```yaml
+``` yaml
$ eval `ssh-agent`
$ tsh --proxy=work.example.com login
```
-`ssh-agent` will print environment variables into the console. Either `eval` the output
-as in the example above, or copy and paste the output into the shell you will be using to
-connect to a Teleport node. The output exports the `SSH_AUTH_SOCK` and `SSH_AGENT_PID`
-environment variables that allow OpenSSH clients to find the SSH agent.
+`ssh-agent` will print environment variables into the console. Either `eval` the
+output as in the example above, or copy and paste the output into the shell you
+will be using to connect to a Teleport node. The output exports the
+`SSH_AUTH_SOCK` and `SSH_AGENT_PID` environment variables that allow OpenSSH
+clients to find the SSH agent.
Lastly, configure the OpenSSH client to use the Teleport proxy when connecting
to nodes with matching names. Edit `~/.ssh/config` for your user or
`/etc/ssh/ssh_config` for global changes:
-```bsh
+``` bsh
# work.example.com is the jump host (proxy). credentials will be obtained from the
# openssh agent.
Host work.example.com
@@ -1753,39 +1840,43 @@ Host *.remote-cluster.example.com
```
When everything is configured properly, you can use ssh to connect to any node
-behind `work.example.com`:
+behind `work.example.com` :
-```bsh
+``` bsh
$ ssh root@database.work.example.com
```
!!! tip "NOTE":
- Teleport uses OpenSSH certificates instead of keys which means you can not connect
- to a Teleport node by IP address. You have to connect by DNS name. This is because
- OpenSSH ensures the DNS name of the node you are connecting is listed under
- the `Principals` section of the OpenSSH certificate to verify you are connecting
- to the correct node.
+ Teleport uses OpenSSH certificates instead of keys which means
+ you can not connect to a Teleport node by IP address. You have to connect by
+ DNS name. This is because OpenSSH ensures the DNS name of the node you are
+ connecting is listed under the `Principals` section of the OpenSSH
+ certificate to verify you are connecting to the correct node.
### Integrating with OpenSSH Servers
-Existing `sshd` servers can be added to a Teleport cluster. For that to work, you
-have to configure `sshd` to trust the Teleport CA.
+Existing `sshd` servers can be added to a Teleport cluster. For that to work,
+you have to configure `sshd` to trust the Teleport CA.
Export the Teleport CA certificate into a file:
-```bsh
+``` bsh
$ tctl auth export --type=user > teleport-user-ca.pub
```
To allow access per-user, append the contents of `teleport-user-ca.pub` to
- `~/.ssh/authorized_keys`.
+`~/.ssh/authorized_keys` .
To allow access for all users:
- * Edit `teleport-user-ca.pub` and remove `cert-authority` from the start of line.
- * Copy `teleport-user-ca.pub` to `/etc/ssh/teleport-user-ca.pub`
- * Update `sshd` configuration (usually `/etc/ssh/sshd_config`) to point to this
- file: `TrustedUserCAKeys /etc/ssh/teleport-user-ca.pub`
+ + Edit `teleport-user-ca.pub` and remove `cert-authority` from the start of
+
+ line.
+
+ + Copy `teleport-user-ca.pub` to `/etc/ssh/teleport-user-ca.pub`
+ + Update `sshd` configuration (usually `/etc/ssh/sshd_config` ) to point to
+
+ this file: `TrustedUserCAKeys /etc/ssh/teleport-user-ca.pub`
## Certificate Rotation
@@ -1796,7 +1887,7 @@ will show you how to implement certificate rotation in practice.
The easiest way to start the rotation is to execute this command on a cluster's
_auth server_:
-```bsh
+``` bsh
$ tctl auth rotate
```
@@ -1805,7 +1896,7 @@ period_ of 48 hours.
This can be customized, i.e.
-```bsh
+``` bsh
# rotate only user certificates with a grace period of 200 hours:
$ tctl auth rotate --type=user --grace-period=200h
@@ -1818,58 +1909,65 @@ needs to be notified that a rotation is taking place and request a new
certificate for itself before the grace period ends.
!!! warning "Warning":
- Be careful when choosing a grace period when rotating host certificates.
- The grace period needs to be long enough for all nodes in a cluster to
- request a new certificate. If some nodes go offline during the rotation and
- come back only after the grace period has ended, they will be forced to
- leave the cluster, i.e. users will no longer be allowed to SSH into
- them.
+ Be careful when choosing a grace period when rotating
+ host certificates. The grace period needs to be long enough for all nodes in
+ a cluster to request a new certificate. If some nodes go offline during the
+ rotation and come back only after the grace period has ended, they will be
+ forced to leave the cluster, i.e. users will no longer be allowed to SSH
+ into them.
To check the status of certificate rotation:
-```bsh
+``` bsh
$ tctl status
```
!!! danger "Version Warning":
- Certificate rotation can only be used with clusters running version 2.6 of
- Teleport or newer. If trusted clusters are used, make sure _all_ connected
- clusters are running version 2.6+. If one of the trusted clusters is running
- an older version of Teleport the trust/connection to that cluster will be
- lost.
+ Certificate rotation can only be used with
+ clusters running version 2.6 of Teleport or newer. If trusted clusters are
+ used, make sure _all_ connected clusters are running version 2.6+. If one of
+ the trusted clusters is running an older version of Teleport the
+ trust/connection to that cluster will be lost.
!!! warning "CA Pinning Warning"
- If you are using [CA Pinning](#untrusted-auth-servers) when adding new nodes,
- the CA pin will changes after the rotation.
+ If you are using [CA Pinning](#untrusted-auth-servers)
+ when adding new nodes, the CA pin will
+ changes after the rotation.
## Ansible Integration
-Ansible uses the OpenSSH client by default. This makes it compatible with Teleport without any extra work, except configuring OpenSSH client to work with Teleport Proxy:
+Ansible uses the OpenSSH client by default. This makes it compatible with
+Teleport without any extra work, except configuring OpenSSH client to work with
+Teleport Proxy:
* configure your OpenSSH to connect to Teleport proxy and use `ssh-agent` socket
-* enable scp mode in the Ansible config file (default is `/etc/ansible/ansible.cfg`):
+* enable scp mode in the Ansible config file (default is
+
+`/etc/ansible/ansible.cfg` ):
-```bsh
+``` bsh
scp_if_ssh = True
```
## Kubernetes Integration
-Teleport 3.0+ can be configured as a compliance gateway for Kubernetes
-clusters. This allows users to authenticate against a Teleport proxy using
-`tsh login` command to retrieve credentials for both SSH and Kubernetes API.
+Teleport 3.0+ can be configured as a compliance gateway for Kubernetes clusters.
+This allows users to authenticate against a Teleport proxy using [`tsh
+login`](../cli-docs/#tsh) command to retrieve credentials for both SSH and
+Kubernetes API.
-Below is a high-level diagram of how Teleport can be deployed in front of
-a Kubernetes cluster:
+Below is a high-level diagram of how Teleport can be deployed in front of a
+Kubernetes cluster:

-For more detailed information, please take a look at [Kubernetes Integration with SSH](architecture.md#kubernetes-integration)
-section in the Architecture chapter.
+For more detailed information, please take a look at [Kubernetes Integration
+with SSH](architecture.md#kubernetes-integration) section in the Architecture
+chapter.
In the scenario illustrated above a user would execute the following commands:
-```bsh
+``` bsh
# Authentication step to retrieve the certificates. tsh login places the SSH
# certificate into `~/.tsh` as usual and updates kubeconfig with Kubernetes
# credentials:
@@ -1884,9 +1982,10 @@ $ kubectl get pods
### Kubernetes/Teleport Configuration
-To enable the Kubernetes integration, first configure the Teleport proxy service as follows:
+To enable the Kubernetes integration, first configure the Teleport proxy service
+as follows:
-```yaml
+``` yaml
# snippet from /etc/teleport.yaml on the Teleport proxy service:
proxy_service:
# create the 'kubernetes' section and set enabled to "yes" (it's "no" by default):
@@ -1902,92 +2001,105 @@ proxy_service:
kubeconfig_file: /path/to/kubeconfig
```
-To make this work, the Teleport proxy server must be able to access a
-Kubernetes API endpoint. This can be done either by:
+To make this work, the Teleport proxy server must be able to access a Kubernetes
+API endpoint. This can be done either by:
* Deploying the proxy service inside a Kubernetes pod.
-* Deploying the proxy service outside Kubernetes adding a valid `kubeconfig` setting to the configuration file as shown above.
+* Deploying the proxy service outside Kubernetes adding a valid `kubeconfig`
-When adding new local users you have to specify which Kubernetes groups they belong to:
+ setting to the configuration file as shown above.
-```bsh
+When adding new local users you have to specify which Kubernetes groups they
+belong to:
+
+``` bsh
$ tctl users add joe --k8s-groups="system:masters"
```
-If using Teleport Community SSO with Github, Kubernetes groups can be assigned to Github teams with a
-Teleport connector. See example above in [Github OAuth 2.0 Example](#github-oauth-20) for more
-information on how to setup Github SSO with Teleport.
+If using Teleport Community SSO with Github, Kubernetes groups can be assigned
+to Github teams with a Teleport connector. See example above in [Github OAuth
+2.0 Example](#github-oauth-20) for more information on how to setup Github SSO
+with Teleport.
-If using Teleport Enterprise SSO with enterprise-grade identity providers (using SAML, OIDC and Active Directory),
-`kubernetes_groups` are assigned to Teleport Roles as shown in the Teleport Enterprise [RBAC](ssh_rbac.md#roles)
-section.
+If using Teleport Enterprise SSO with enterprise-grade identity providers (using
+SAML, OIDC and Active Directory), `kubernetes_groups` are assigned to Teleport
+Roles as shown in the Teleport Enterprise [RBAC](ssh_rbac.md#roles) section.
-You may also find it useful to read our [Kubernetes guide](kubernetes_ssh.md) which contains some more specific examples
-and instructions.
+You may also find it useful to read our [Kubernetes guide](kubernetes_ssh.md)
+which contains some more specific examples and instructions.
### Multiple Kubernetes Clusters
-You can take advantage of the [Trusted Clusters](#trusted-clusters) feature
-of Teleport to federate trust across multiple Kubernetes clusters.
+You can take advantage of the [Trusted Clusters](#trusted-clusters) feature of
+Teleport to federate trust across multiple Kubernetes clusters.
When multiple trusted clusters are present behind a Teleport proxy, the
-`kubeconfig` generated by `tsh login` will contain the Kubernetes API endpoint
-determined by the `` argument to `tsh login`.
+`kubeconfig` generated by [ `tsh login` ](../cli-docs/#tsh-login) will contain the
+Kubernetes API endpoint determined by the `` argument to [`tsh
+login`](../cli-docs/#tsh-login) .
+
+* There are three Teleport/Kubernetes clusters: "main", "east" and "west". These
+
+ are the names set in `cluster_name` setting in their configuration files.
-* There are three Teleport/Kubernetes clusters: "main", "east" and "west".
- These are the names set in `cluster_name` setting in their configuration
- files.
* The clusters "east" and "west" are trusted clusters for "main".
-* Users always authenticate against "main" but use their certificates to
- access SSH nodes and Kubernetes API in all three clusters.
+* Users always authenticate against "main" but use their certificates to access
+
+ SSH nodes and Kubernetes API in all three clusters.
+
* The DNS name of the main proxy server is "main.example.com"
In this scenario, users usually login using this command:
-```bsh
+``` bsh
# Using login without arguments
$ tsh --proxy=main.example.com login
# user's `kubeconfig` now contains one entry for the main Kubernetes
-# endpoint, i.e. `proxy.example.com`.
+# endpoint, i.e. `proxy.example.com` .
# Receive a certificate for "east":
$ tsh --proxy=main.example.com login east
# user's `kubeconfig` now contains the entry for the "east" Kubernetes
-# endpoint, i.e. `east.proxy.example.com`.
+# endpoint, i.e. `east.proxy.example.com` .
```
## High Availability
!!! tip "Tip":
- Before continuing, please make sure to take a look at the [Cluster State section](architecture/#cluster-state)
- in the Teleport Architecture documentation.
+ Before continuing, please make sure to take a look at the
+ [Cluster State section](architecture/#cluster-state) in the Teleport
+ Architecture documentation.
Usually there are two ways to achieve high availability. You can "outsource"
this function to the infrastructure. For example, using a highly available
network-based disk volumes (similar to AWS EBS) and by migrating a failed VM to
a new host. In this scenario, there's nothing Teleport-specific to be done.
-If high availability cannot be provided by the infrastructure (perhaps
-you're running Teleport on a bare metal cluster), you can still configure Teleport
-to run in a highly available fashion.
+If high availability cannot be provided by the infrastructure (perhaps you're
+running Teleport on a bare metal cluster), you can still configure Teleport to
+run in a highly available fashion.
### Auth Server HA
-In order to run multiple instances of Teleport Auth Server, you must switch to a highly available secrets back-end first.
-Also, you must tell each node in a cluster that there is
-more than one auth server available. There are two ways to do this:
+In order to run multiple instances of Teleport Auth Server, you must switch to a
+highly available secrets back-end first. Also, you must tell each node in a
+cluster that there is more than one auth server available. There are two ways to
+do this:
+
+ + Use a load balancer to create a single the auth API access point (AP) and
+
+ specify this AP in `auth_servers` section of Teleport configuration for all
+ nodes in a cluster. This load balancer should do TCP level forwarding.
+
+ + If a load balancer is not an option, you must specify each instance of an
- * Use a load balancer to create a single the auth API access point (AP) and
- specify this AP in `auth_servers` section of Teleport configuration for
- all nodes in a cluster. This load balancer should do TCP level forwarding.
- * If a load balancer is not an option, you must specify each instance of an
auth server in `auth_servers` section of Teleport configuration.
**IMPORTANT:** with multiple instances of the auth servers running, special
attention needs to be paid to keeping their configuration identical. Settings
-like `cluster_name`, `tokens`, `storage`, etc must be the same.
+like `cluster_name` , `tokens` , `storage` , etc must be the same.
### Teleport Proxy HA
@@ -1996,22 +2108,27 @@ If using the [default configuration](#ports), configure your load balancer to
forward ports `3023` and `3080` to the servers that run the Teleport proxy. If
you have configured your proxy to use non-default ports, you will need to
configure your load balancer to forward the ports you specified for
-`listen_addr` and `web_listen_addr` in `teleport.yaml`. The load balancer for
-`web_listen_addr` can terminate TLS with your own certificate that is valid
-for your users, while the remaining ports should do TCP level forwarding, since
+`listen_addr` and `web_listen_addr` in `teleport.yaml` . The load balancer for
+`web_listen_addr` can terminate TLS with your own certificate that is valid for
+your users, while the remaining ports should do TCP level forwarding, since
Teleport will handle its own SSL on top of that with its own certificates.
!!! tip "NOTE":
- If you terminate TLS with your own certificate at a load balancer you'll need
- to Teleport with `--insecure`
+ If you terminate TLS with your own certificate at a load
+ balancer you'll need to Teleport with `--insecure`
If your load balancer supports health checks, configure it to hit the
`/webapi/ping` endpoint on the proxy. This endpoint will reply `200 OK` if the
proxy is running without problems.
!!! tip "NOTE":
- As the new auth servers get added to the cluster and the old servers get decommissioned, nodes and proxies will refresh the list of available auth servers and store it in their local cache `/var/lib/teleport/authservers.json`. The values from the cache file will take precedence over the configuration
- file.
+ As the new auth servers get added to the cluster and the old
+ servers get decommissioned, nodes and proxies will refresh the list of
+ available auth servers and store it in their local cache
+
+`/var/lib/teleport/authservers.json` . The values from the cache file will take
+
+ precedence over the configuration file.
We'll cover how to use `etcd` and `DynamoDB` storage back-ends to make Teleport
highly available below.
@@ -2019,21 +2136,27 @@ highly available below.
### Using etcd
Teleport can use [etcd](https://coreos.com/etcd/) as a storage backend to
-achieve highly available deployments. You must take steps to protect access
-to `etcd` in this configuration because that is where Teleport secrets like
-keys and user records will be stored.
+achieve highly available deployments. You must take steps to protect access to
+`etcd` in this configuration because that is where Teleport secrets like keys
+and user records will be stored.
To configure Teleport for using etcd as a storage back-end:
* Make sure you are using **etcd version 3.3** or newer.
-* Install etcd and configure peer and client TLS authentication using the
- [etcd security guide](https://coreos.com/etcd/docs/latest/security.html).
+* Install etcd and configure peer and client TLS authentication using the [etcd
+
+ security guide](https://coreos.com/etcd/docs/latest/security.html).
+
* Configure all Teleport Auth servers to use etcd in the "storage" section of
+
the config file as shown below.
+
* Deploy several auth servers connected to etcd back-end.
-* Deploy several proxy nodes that have `auth_servers` pointed to list of auth servers to connect to.
+* Deploy several proxy nodes that have `auth_servers` pointed to list of auth
-```yaml
+ servers to connect to.
+
+``` yaml
teleport:
storage:
type: etcd
@@ -2060,36 +2183,34 @@ teleport:
### Using Amazon S3
!!! tip "Tip":
- Before continuing, please make sure to take a look at the [cluster state section](architecture/#cluster-state)
- in Teleport Architecture documentation.
+ Before continuing, please make sure to take a look at the
+ [cluster state section](architecture/#cluster-state) in Teleport
+ Architecture documentation.
!!! tip "AWS Authentication":
- The configuration examples below contain AWS access keys and secret keys. They are optional,
- they exist for your convenience but we DO NOT RECOMMEND using them in
- production. If Teleport is running on an AWS instance it will automatically
- use the instance IAM role. Teleport also will pick up AWS credentials from
- the `~/.aws` folder, just like the AWS CLI tool.
-
-S3 buckets can only be used as a storage for the recorded sessions. S3 cannot store
-the audit log or the cluster state. Below is an example of how to configure a Teleport
-auth server to store the recorded sessions in an S3 bucket.
-
-
-```yaml
+ The configuration examples below contain AWS
+ access keys and secret keys. They are optional, they exist for your
+ convenience but we DO NOT RECOMMEND using them in production. If Teleport is
+ running on an AWS instance it will automatically use the instance IAM role.
+ Teleport also will pick up AWS credentials from the `~/.aws` folder, just
+ like the AWS CLI tool.
+
+S3 buckets can only be used as a storage for the recorded sessions. S3 cannot
+store the audit log or the cluster state. Below is an example of how to
+configure a Teleport auth server to store the recorded sessions in an S3 bucket.
+
+``` yaml
teleport:
storage:
- # The region setting sets the default AWS region for all AWS services
+ # The region setting sets the default AWS region for all AWS services
# Teleport may consume (DynamoDB, S3)
- region: us-west-1
+ region: us-east-1
- # Path to S3 bucket to store the recorded sessions in. The optional 'region'
- # parameter allows to override the region setting above, keeping S3 recordings
- # in a different region:
- audit_sessions_uri: s3://example.com/path/to/bucket?region=us-east-1
+ # Path to S3 bucket to store the recorded sessions in.
+ audit_sessions_uri: "s3://Example_TELEPORT_S3_BUCKET/records"
- # Authentication settings are optional (see below)
- access_key: BKZA3H2LOKJ1QJ3YF21A
- secret_key: Oc20333k293SKwzraT3ah3Rv1G3/97POQb3eGziSZ
+ # Teleport assumes credentials. Using provider chains, assuming IAM role or
+ # standard .aws/credentials in the home folder.
```
The AWS authentication settings above can be omitted if the machine itself is
@@ -2098,72 +2219,81 @@ running on an EC2 instance with an IAM role.
### Using DynamoDB
!!! tip "Tip":
- Before continuing, please make sure to take a look at the [cluster state section](architecture/#cluster-state)
- in Teleport Architecture documentation.
+ Before continuing, please make sure to take a look at the
+ [cluster state section](architecture/#cluster-state) in Teleport
+ Architecture documentation.
-If you are running Teleport on AWS, you can use [DynamoDB](https://aws.amazon.com/dynamodb/)
-as a storage back-end to achieve high availability. DynamoDB back-end supports two types
-of Teleport data:
+If you are running Teleport on AWS, you can use
+[DynamoDB](https://aws.amazon.com/dynamodb/) as a storage back-end to achieve
+high availability. DynamoDB back-end supports two types of Teleport data:
* Cluster state
* Audit log events
-DynamoDB cannot store the recorded sessions. You are advised to use AWS S3 for that as shown above.
-To configure Teleport to use DynamoDB:
+DynamoDB cannot store the recorded sessions. You are advised to use AWS S3 for
+that as shown above. To configure Teleport to use DynamoDB:
* Make sure you have AWS access key and a secret key which give you access to
- DynamoDB account. If you're using (as recommended) an IAM role for this, the policy
- with necessary permissions is listed below.
-* Configure all Teleport Auth servers to use DynamoDB back-end in the "storage" section
- of `teleport.yaml` as shown below.
+
+ DynamoDB account. If you're using (as recommended) an IAM role for this, the
+ policy with necessary permissions is listed below.
+
+* Configure all Teleport Auth servers to use DynamoDB back-end in the "storage"
+
+ section of `teleport.yaml` as shown below.
+
* Deploy several auth servers connected to DynamoDB storage back-end.
* Deploy several proxy nodes.
-* Make sure that all Teleport nodes have `auth_servers` configuration setting populated with the auth servers.
+* Make sure that all Teleport nodes have `auth_servers` configuration setting
-```yaml
+ populated with the auth servers.
+
+``` yaml
teleport:
storage:
type: dynamodb
- region: eu-west-1
+ # Region location of dynamodb instance, https://docs.aws.amazon.com/en_pv/general/latest/gr/rande.html#ddb_region
+ region: us-east-1
# Name of the DynamoDB table. If it does not exist, Teleport will create it.
- table_name: teleport_table
+ table_name: Example_TELEPORT_DYNAMO_TABLE_NAME
# Authentication settings are optional (see below)
access_key: BKZA3H2LOKJ1QJ3YF21A
secret_key: Oc20333k293SKwzraT3ah3Rv1G3/97POQb3eGziSZ
- # This setting configures Teleport to send the audit events to three places:
- # To keep a copy on a local filesystem, in DynamoDB and to Stdout.
+ # This setting configures Teleport to send the audit events to three places:
+ # To keep a copy on a local filesystem, in DynamoDB and to Stdout.
audit_events_uri: ['file:///var/lib/teleport/audit/events', 'dynamodb://table_name', 'stdout://']
# This setting configures Teleport to save the recorded sessions in an S3 bucket:
- audit_sessions_uri: 's3://example.com/teleport.events'
+ audit_sessions_uri: s3://Example_TELEPORT_S3_BUCKET/records
```
-* Replace `region` and `table_name` with your own settings. Teleport will
- create the table automatically.
+* Replace `us-east-1` and `Example_TELEPORT_DYNAMO_TABLE_NAME` with your own settings. Teleport will create the table automatically.
+
* The AWS authentication setting above can be omitted if the machine itself is
running on an EC2 instance with an IAM role.
+
* Audit log settings above are optional. If specified, Teleport will store the
audit log in DynamoDB and the session recordings **must** be stored in an S3
- bucket, i.e. both `audit_xxx` settings must be present. If they are not set,
+ bucket, i.e.both `audit_xxx` settings must be present. If they are not set,
Teleport will default to a local file system for the audit log, i.e.
- `/var/lib/teleport/log` on an auth server.
-* If DynamoDB is used for the audit log, the logged events will be stored with
- a TTL of 1 year. Currently this TTL is not configurable.
+`/var/lib/teleport/log` on an auth server.
-!!! warning "Access to DynamoDB":
- Make sure that the IAM role assigned to Teleport is configured with the
- sufficient access to DynamoDB. Below is the example of the IAM policy you
- can use:
+* If DynamoDB is used for the audit log, the logged events will be stored with a
+ TTL of 1 year. Currently this TTL is not configurable.
-```js
+!!! warning "Access to DynamoDB":
+ Make sure that the IAM role assigned to
+ Teleport is configured with the sufficient access to DynamoDB. Below is the
+ example of the IAM policy you can use:
+
+``` js
{
"Version": "2012-10-17",
- "Statement": [
- {
+ "Statement": [{
"Sid": "AllAPIActionsOnTeleportAuth",
"Effect": "Allow",
"Action": "dynamodb:*",
@@ -2181,16 +2311,20 @@ teleport:
## Upgrading Teleport
-Teleport is always a critical component of the infrastructure it runs on. This is why upgrading to a new version must be performed with caution.
+Teleport is always a critical component of the infrastructure it runs on. This
+is why upgrading to a new version must be performed with caution.
-Teleport is a much more capable system than a bare bones SSH server.
-While it offers significant benefits on a cluster level, it also adds some complexity to cluster upgrades. To ensure robust operation Teleport administrators must follow the upgrade rules listed below.
+Teleport is a much more capable system than a bare bones SSH server. While it
+offers significant benefits on a cluster level, it also adds some complexity to
+cluster upgrades. To ensure robust operation Teleport administrators must follow
+the upgrade rules listed below.
### Production Releases
First of all, avoid running pre-releases (release candidates) in production
-environments. Teleport development team uses [Semantic Versioning](https://semver.org/)
-which makes it easy to tell if a specific version is recommended for production use.
+environments. Teleport development team uses [Semantic
+Versioning](https://semver.org/) which makes it easy to tell if a specific
+version is recommended for production use.
### Component Compatibility
@@ -2198,26 +2332,48 @@ When running multiple binaries of Teleport within a cluster (nodes, proxies,
clients, etc), the following rules apply:
* Patch versions are always compatible, for example any 4.0.1 component will
+
work with any 4.0.3 component.
+
* Other versions are always compatible with their **previous** release. This
+
means you must not attempt to upgrade from 3.3 straight to 3.5. You must
upgrade to 3.4 first.
-* Teleport clients (`tsh` for users and `tctl` for admins) may not be compatible if older than the auth or the proxy server. They will print an error if there is an incompatibility.
-* While 4.0 is a major release. 3.2 can be upgraded to 4.0 using the same upgrade sequence below.
+
+* Teleport clients ( [ `tsh` ](../cli-docs/#tsh) for users and
+
+ [ `tctl` ](../cli-docs/#tctl) for admins) may not be compatible
+
+ if older than the auth or the proxy server. They will print an error if there
+ is an incompatibility.
+
+* While 4.0 is a major release.3.2 can be upgraded to 4.0 using the same
+
+ upgrade sequence below.
### Upgrade Sequence
When upgrading a single Teleport cluster:
-1. **Upgrade the auth server first**. The auth server keeps the cluster state and
- if there are data format changes introduced in the new version this will
+1. **Upgrade the auth server first**. The auth server keeps the cluster state
+
+ and if there are data format changes introduced in the new version this will
perform necessary migrations.
-2. Then, upgrade the proxy servers. The proxy servers are stateless and can be upgraded
- in any sequence or at the same time.
+
+2. Then, upgrade the proxy servers. The proxy servers are stateless and can be
+
+ upgraded in any sequence or at the same time.
+
3. Finally, upgrade the SSH nodes in any sequence or at the same time.
!!! warning "Warning":
- If several auth servers are running in HA configuration (for example, in AWS auto-scaling group) you have to shrink the group to **just one auth server** prior to performing an upgrade. While Teleport will attempt to perform any necessary migrations, we recommend users create a backup of their backend before upgrading the Auth Server, as a precaution. This allows for a safe rollback in case the migration itself fails.
+ If several auth servers are running in HA configuration
+ (for example, in AWS auto-scaling group) you have to shrink the group to
+ **just one auth server** prior to performing an upgrade. While Teleport
+ will attempt to perform any necessary migrations, we recommend users
+ create a backup of their backend before upgrading the Auth Server, as a
+ precaution. This allows for a safe rollback in case the migration itself
+ fails.
When upgrading multiple clusters:
@@ -2226,75 +2382,91 @@ When upgrading multiple clusters:
### Daemon Restarts
-As covered in the [Graceful Restarts](#graceful-restarts) section, Teleport supports
-graceful restarts. To upgrade a host to a newer Teleport version, an administrator must:
+As covered in the [Graceful Restarts](#graceful-restarts) section, Teleport
+supports graceful restarts. To upgrade a host to a newer Teleport version, an
+administrator must:
+
+1. Replace the Teleport binaries, usually [ `teleport` ](../cli-docs/#teleport)
+
+ and [ `tctl` ](../cli-docs/#tctl)
-1. Replace the Teleport binaries, usually `teleport` and `tctl`
2. Execute `systemctl restart teleport`
-This will perform a graceful restart, i.e. the Teleport daemon will fork a new
+This will perform a graceful restart, i.e.the Teleport daemon will fork a new
process to handle new incoming requests, leaving the old daemon process running
until existing clients disconnect.
## License File
-Commercial Teleport subscriptions require
-a valid license. The license file can be downloaded from the [Teleport Customer
+Commercial Teleport subscriptions require a valid license. The license file can
+be downloaded from the [Teleport Customer
Portal](https://dashboard.gravitational.com).
The Teleport license file contains a X.509 certificate and the corresponding
-private key in PEM format. Place the downloaded file on Auth servers and set
-the `license_file` configuration parameter of your `teleport.yaml` to point to
-the file location:
+private key in PEM format. Place the downloaded file on Auth servers and set the
+`license_file` configuration parameter of your `teleport.yaml` to point to the
+file location:
-```yaml
+``` yaml
auth_service:
license_file: /var/lib/teleport/license.pem
```
The `license_file` path can be either absolute or relative to the configured
-`data_dir`. If license file path is not set, Teleport will look for the
-`license.pem` file in the configured `data_dir`.
+`data_dir` . If license file path is not set, Teleport will look for the
+`license.pem` file in the configured `data_dir` .
!!! tip "NOTE":
- Only Auth servers require the license. Proxies and Nodes that do not also
- have Auth role enabled do not need the license.
+ Only Auth servers require the license. Proxies and Nodes that do
+ not also have Auth role enabled do not need the license.
## Troubleshooting
-To diagnose problems you can configure `teleport` to run with verbose logging enabled
-by passing it `-d` flag.
+To diagnose problems you can configure [ `teleport` ](../cli-docs/#teleport) to
+run with verbose logging enabled by passing it `-d` flag.
!!! tip "NOTE":
- It is not recommended to run Teleport in production with verbose logging
- as it generates a substantial amount of data.
+ It is not recommended to run Teleport in production with verbose
+ logging as it generates a substantial amount of data.
-Sometimes you may want to reset `teleport` to a clean state. This can be accomplished
-by erasing everything under `"data_dir"` directory. Assuming the default location,
-`rm -rf /var/lib/teleport/*` will do.
+Sometimes you may want to reset [ `teleport` ](../cli-docs/#teleport) to a clean
+state. This can be accomplished by erasing everything under `"data_dir"`
+directory. Assuming the default location, `rm -rf /var/lib/teleport/*` will do.
Teleport also supports HTTP endpoints for monitoring purposes. They are disabled
by default, but you can enable them:
-```yaml
+``` yaml
$ teleport start --diag-addr=127.0.0.1:3000
```
Now you can see the monitoring information by visiting several endpoints:
-* `http://127.0.0.1:3000/metrics` is the list of internal metrics Teleport is tracking.
- It is compatible with [Prometheus](https://prometheus.io/) collectors.
-* `http://127.0.0.1:3000/healthz` returns "OK" if the process is healthy or `503` otherwise.
-* `http://127.0.0.1:3000/readyz` is similar to `/healthz`, but it returns "OK"
- _only after_ the node successfully joined the cluster, i.e. it draws the
+* `http://127.0.0.1:3000/metrics` is the list of internal metrics Teleport is
+
+ tracking. It is compatible with [Prometheus](https://prometheus.io/)
+ collectors.
+
+* `http://127.0.0.1:3000/healthz` returns "OK" if the process is healthy or
+
+`503` otherwise.
+
+* `http://127.0.0.1:3000/readyz` is similar to `/healthz` , but it returns "OK"
+
+ _only after_ the node successfully joined the cluster, i.e.it draws the
difference between "healthy" and "ready".
+
* `http://127.0.0.1:3000/debug/pprof/` is Golang's standard profiler. It's only
+
available when `-d` flag is given in addition to `--diag-addr`
## Getting Help
-Please open an [issue on Github](https://github.com/gravitational/teleport/issues).
-Alternatively, you can reach through the contact form on our [website](https://gravitational.com/).
+Please open an [issue on
+Github](https://github.com/gravitational/teleport/issues). Alternatively, you
+can reach through the contact form on our [website](https://gravitational.com/).
+
+For commercial support, custom features or to try our commercial edition,
+[Teleport Enterprise](/enterprise/), please reach out to us:
+`sales@gravitational.com` .
-For commercial support, custom features or to try our commercial edition, [Teleport Enterprise](/enterprise/),
-please reach out to us: `sales@gravitational.com`.
diff --git a/docs/4.1/architecture/auth.md b/docs/4.1/architecture/auth.md
new file mode 100644
index 0000000000000..7cd18acadd86b
--- /dev/null
+++ b/docs/4.1/architecture/auth.md
@@ -0,0 +1,290 @@
+# Teleport Auth
+
+This is doc about the Teleport Authentication Service and Certificate
+Management. It explains how Users and Nodes are identified and granted access to
+Nodes and Services.
+
+[TOC]
+
+## Authentication vs. Authorization
+
+Teleport Auth handles both authentication and authorization. These topics are
+related but different and they are often discussed jointly as "Auth".
+
+**Authentication** is proving an identity. "I say I am Bob, and I really am Bob.
+See look I have Bob's purple hat.". The job of an Authentication system is to
+define the criteria by which users must prove their identity. Is having a purple
+hat enough to show that a person is Bob? Maybe, maybe not. To identify users and
+nodes to Teleport Auth we require them to present a cryptographically-signed
+certificate issued by the Teleport Auth Certificate Authority.
+
+**Authorization** is proving access to something: "Bob has a purple hat, but
+also a debit card and the correct PIN code. Bob can access a bank account with
+the number 814000001344. Can Bob get $20 out of the ATM?". The ATM's
+Authentication system would validate Bob's PIN Code, while the Authorization
+system would use a stored mapping from Bob to Account 814000001344 to decide
+whether Bob could withdraw cash. Authorization defines and determines
+permissions that users have within a system, such as access to cash within a
+banking system or data in a filesystem. Before users are granted access to
+nodes, the Auth Service checks their identity against a stored mapping in a
+database.
+
+
+
+## SSH Certificates
+
+One can think of an SSH certificate as a "permit" issued and time-stamped by a
+trusted authority. In this case the authority is the Auth Server's Certificate
+Authority. A certificate contains four important pieces of data:
+
+1. List of principals (identities) this certificate belongs to.
+2. Signature of the certificate authority who issued it.
+3. The expiration date, also known as "time-to-live" or simply TTL.
+4. Additional data, such as the node role, stored as a certificate extension.
+
+## Authentication in Teleport
+
+Teleport uses SSH certificates to authenticate nodes and users within a cluster.
+
+There are two CAs operating inside the Auth Server because nodes and users each
+need their own certificates.
+
+* The **Node CA** issues certificates which identify a node (i.e. host, server,
+ computer). These certificates are used to add new nodes to a cluster and
+ identify connections coming from the node.
+* The **User CA** issues certificates which identify a User. These certificates
+ are used to authenticate users when they try to connect to a cluster node.
+
+### Issuing Node Certificates
+
+Node Certificates identify a node within a cluster and establish the permissions
+of the node to access to other Teleport services. The presence of a signed
+certificate on a node makes it a cluster member.
+
+
+
+1. To join a cluster for the first time, a node must present a "join token" to
+ the auth server. The token can be static (configured via config file) or a
+ dynamic, single-use token generated by [`tctl nodes
+ add`](../cli-docs/#tctl-nodes-add).
+
+ !!! tip "Token TTL":
+ When using dynamic tokens, their default time to live (TTL) is 15
+ minutes, but it can be reduced (not increased) via
+ [`tctl nodes add --ttl`](../cli-docs/#tctl-nodes-add) flag.
+
+2. When a new node joins the cluster, the auth server generates a new
+ public/private keypair for the node and signs its certificate. This node
+ certificate contains the node's role(s) (`proxy`, `auth` or `node`) as a
+ certificate extension (opaque signed string).
+
+### Using Node Certificates
+
+
+
+All nodes in a cluster can connect to the [Auth Server's API](#auth-api-server)
+ implemented as an HTTP REST service running over the SSH
+tunnel. This API connection is authenticated with the node certificate and the
+encoded role is checked to enforce access control. For example, a client
+connection using a certificate with only the `node` role won't be able to add
+and delete users. This client connection would only be authorized to get auth
+servers registered in the cluster.
+
+### Issuing User Certificates
+
+
+
+The Auth Server uses its User CA to issue user certificates. User certificates
+are stored on a user's machine in the `~/.tsh/` directory or also
+by the system's SSH agent if it is running.
+
+1. To get permission to join a cluster for the first time a user must provide
+ their username, password, and 2nd-factor token. Users can log in with [`tsh
+ login`](../cli-docs/#tsh-login) or via the Web UI. The Auth Server check
+ these against its identity storage and checks the 2nd factor token.
+
+2. If the correct credentials were offered, the Auth Server will generate a
+ signed certificate and return it to the client. For users certificates are
+ stored in `~/.tsh` by default. If the client uses the [Web
+ UI](./proxy/#web-ui-to-ssh) the signed certificate is associated with a
+ secure websocket session.
+
+In addition to user's identity, user certificates also contain user roles and
+SSH options, like "permit-agent-forwarding" .
+
+This additional data is stored as a certificate extension and is protected by
+the CA signature.
+
+### Using User Certificates
+
+
+
+When a client requests to access a node cluster, the Auth Server first checks
+that a certificate exists and hasn't expired. If it has expired, the client must
+re-authenticate with their username, password, and 2nd factor. If the
+certificate is still valid, the Auth Server validates the certificate's
+signature.
+
+If it is correct the client is granted access to the cluster. From here, the
+[Proxy Server](./proxy/#connecting-to-a-node) establishes a connection between
+client and node.
+
+## Certificate Rotation
+
+By default, all user certificates have an expiration date, also known as time to
+live (TTL). This TTL can be configured by a Teleport administrator. But the node
+certificates issued by an Auth Server are valid indefinitely by default.
+
+Teleport supports certificate rotation, i.e. the process of invalidating all
+previously-issued certificates for nodes _and_ users regardless of their TTL.
+Certificate rotation is triggered by [`tctl auth
+rotate`](../cli-docs/#tctl-auth). When this command is invoked by a Teleport
+administrator on one of cluster's Auth Servers, the following happens:
+
+1. A new certificate authority (CA) key is generated.
+2. The old CA will be considered valid _alongside_ the new CA for some period of
+ time. This period of time is called a _grace period_
+3. During the grace period, all previously issued certificates will be
+ considered valid, assuming their TTL isn't expired.
+4. After the grace period is over, the certificates issued by the old CA are no
+ longer accepted.
+
+This process is repeated twice, one for the node CA and once for the user CA.
+
+Take a look at the [Certificate Guide](../admin-guide/#certificate-rotation) to
+learn how to do certificate rotation in practice.
+
+## Auth API
+
+
+
+Clients can also connect to the auth API through the Teleport proxy to use a
+limited subset of the API to discover the member nodes of the cluster.
+
+## Auth State
+
+The Auth service maintains state using a database of users, credentials,
+certificates, and audit logs. The default storage location is
+`/var/lib/teleport` or an [admin-configured storage
+destination](../admin-guide/#high-availability).
+
+There are three types of data stored by the auth server:
+
+* **Cluster State** The auth server stores its own keys in a cluster state
+ storage. All of cluster dynamic configuration is stored there as well,
+ including:
+ * Node membership information and online/offline status for each node.
+ * List of active sessions.
+ * List of locally stored users
+ * RBAC configuration (roles and permissions).
+ * Other dynamic configuration.
+* **Audit Log** When users log into a Teleport cluster, execute remote commands
+ and logout, that activity is recorded in the audit log. See Audit Log for more
+ details. More on this in the [Audit Log section below](#audit-log).
+* **Recorded Sessions** When Teleport users launch remote shells via `tsh ssh`
+ command, their interactive sessions are recorded and stored by the auth
+ server. Each recorded session is a file which is saved in /var/lib/teleport by
+ default, but can also be saved in external storage, like an AWS S3 bucket.
+
+## Audit Log
+
+The Teleport auth server keeps the audit log of SSH-related events that take
+place on any node with a Teleport cluster. Each node in a cluster emits audit
+events and submit them to the auth server. The events recorded include:
+
+* successful user logins
+* node IP addresses
+* session time
+* session IDs
+
+!!! warning "Compatibility Warning":
+ Because all SSH events like `exec` or `session_start` are reported by the
+ Teleport node service, they will not be logged if you are using OpenSSH
+ `sshd` daemon on your nodes.
+
+Only an SSH server can report what's happening to the Teleport auth server.
+The audit log is a JSON file which is by default stored on the auth server's
+filesystem under `/var/lib/teleport/log`. The format of the file is documented
+in the [Admin Manual](admin-guide/#audit-log).
+
+Teleport users are encouraged to export the events into external, long term
+storage.
+
+!!! info "Deployment Considerations":
+ If multiple Teleport auth servers are used
+ to service the same cluster (HA mode) a network file system must be used for
+ `/var/lib/teleport/log` to allow them to combine all audit events into the
+ same audit log. [Learn how to deploy Teleport in HA Mode.](../admin-guide#high-availability))
+
+## Recording Proxy Mode
+
+In this mode, the proxy terminates (decrypts) the SSH connection using the
+certificate supplied by the client via SSH agent forwarding and then establishes
+its own SSH connection to the final destination server, effectively becoming an
+authorized "man in the middle". This allows the proxy server to forward SSH
+session data to the auth server to be recorded, as shown below:
+
+
+
+The recording proxy mode, although _less secure_, was added to allow Teleport
+users to enable session recording for OpenSSH's servers running `sshd`, which is
+helpful when gradually transitioning large server fleets to Teleport.
+
+We consider the "recording proxy mode" to be less secure for two reasons:
+
+1. It grants additional privileges to the Teleport proxy. In the default mode,
+ the proxy stores no secrets and cannot "see" the decrypted data. This makes a
+ proxy less critical to the security of the overall cluster. But if an
+ attacker gains physical access to a proxy node running in the "recording"
+ mode, they will be able to see the decrypted traffic and client keys stored
+ in proxy's process memory.
+2. Recording proxy mode requires the SSH agent forwarding. Agent forwarding is
+ required because without it, a proxy will not be able to establish the 2nd
+ connection to the destination node.
+
+However, there are advantages of proxy-based session recording too. When
+sessions are recorded at the nodes, a root user can add iptables rules to
+prevent sessions logs from reaching the Auth Server. With sessions recorded at
+the proxy, users with root privileges on nodes have no way of disabling the
+audit.
+
+See the [admin guide](../admin-guide#recorded-sessions) to learn how to turn on the
+recording proxy mode.
+
+## Storage Back-Ends
+
+Different types of cluster data can be configured with different storage
+back-ends as shown in the table below:
+
+Data Type | Supported Back-ends | Notes
+-----------------|---------------------------|---------
+Cluster state | `dir`, `etcd`, `dynamodb` | Multi-server (HA) configuration is only supported using `etcd` and `dynamodb` back-ends.
+Audit Log Events | `dir`, `dynamodb` | If `dynamodb` is used for the audit log events, `s3` back-end **must** be used for the recorded sessions.
+Recorded Sessions| `dir`, `s3` | `s3` is mandatory if `dynamodb` is used for the audit log.
+
+!!! tip "Note":
+ The reason Teleport designers split the audit log events and the recorded
+ sessions into different back-ends is because of the nature of the data. A
+ recorded session is a compressed binary stream (blob) while the event is a
+ well-defined JSON structure. `dir` works well enough for both in small
+ deployments, but large clusters require specialized data stores: S3 is
+ perfect for uploading session blobs, while DynamoDB or `etcd` are better
+ suited to store the cluster state.
+
+The combination of DynamoDB + S3 is especially popular among AWS users because
+it allows them to run Teleport clusters completely devoid of local state.
+
+!!! tip "NOTE":
+ For high availability in production, a Teleport cluster can be
+ serviced by multiple auth servers running in sync. Check [HA
+ configuration](admin-guide.md#high-availability) in the Admin Guide.
+
+
+## More Concepts
+
+* [Architecture Overview](./architecture)
+* [Teleport Users](./users)
+* [Teleport Nodes](./nodes)
+* [Teleport Proxy](./proxy)
diff --git a/docs/4.1/architecture/nodes.md b/docs/4.1/architecture/nodes.md
new file mode 100644
index 0000000000000..9820324364179
--- /dev/null
+++ b/docs/4.1/architecture/nodes.md
@@ -0,0 +1,124 @@
+## Teleport Nodes
+
+[TOC]
+
+## The Node Service
+
+A regular node becomes a Teleport Node when the node joins a cluster with an
+"join" token. Read about how nodes are issued certificates in the
+[Auth Guide](./auth/#issuing-node-certificates).
+
+
+
+A Teleport Node runs the [`teleport`](../cli-docs/#teleport) daemon with the
+`node` role. This process handles incoming connection requests, authentication,
+and remote command execution on the node, similar to the function of OpenSSH's
+`sshd`.
+
+
+
+All cluster Nodes keep the Auth Server updated with their status with periodic
+ping messages. They report their IP addresses and values of their assigned
+labels. Nodes can access the list of all Nodes in their cluster via the
+[Auth Server API](./auth/#auth-api).
+
+!!! tip "Tip"
+ In most environments we advise replacing the OpenSSH daemon `sshd`
+ with the Teleport Node Service unless there are existing workflows relying
+ on `ssh` or in special cases such as embedded devices that can't run
+ custom binaries.
+
+The `node` service provides SSH access to every node with all of the following clients:
+
+* [OpenSSH: `ssh`](../admin-guide/#using-teleport-with-openssh)
+* [Teleport CLI client: `tsh ssh`](../cli-docs/#tsh-ssh)
+* [Teleport Proxy UI](./proxy/#web-to-ssh-proxy) accessed via a web browser.
+
+Each client is authenticated via the [Auth Service](./auth/#authentication-in-teleport) before being granted access to a Node.
+
+## Node Identity on a Cluster
+
+Node Identity is defined on the Cluster level by the certificate a node possesses.
+
+
+
+This certificate contains information about the node including:
+
+* The **host ID**, a generated UUID unique to a node
+* A **nodename**, which defaults to `hostname` of the node, but can be configured.
+* The **cluster_name**, which defaults to the `hostname` of the auth server, but can be configured
+* The node **role** (i.e. `node,proxy`) encoded as a certificate extension
+* The cert **TTL** (time-to-live)
+
+A Teleport Cluster is a set of one or more machines whose public keys are signed
+by the same certificate authority (CA) operating in the Auth Server. A
+certificate is issued to a node when it joins the cluster for the first time.
+Learn more about this process in the [Auth
+Guide](./auth/#authentication-in-teleport).
+
+!!! warning "Single-Node Clusters are Clusters"
+ Once a Node gets a signed certificate from the Node CA, the Node is considered a member of the cluster, even if that cluster has only one node.
+
+## Connecting to Nodes
+
+When a client requests access to a Node, authentication is always performed
+through a cluster proxy. When the proxy server receives a connection request
+from a client it validates the client's credentials with the Auth Service. Once
+the client is authenticated the proxy attempts to connect the client to the
+requested Node.
+
+There is a detailed walk-through of the steps needed to initiate a connection to
+a node in the [Architecture Overview](./architecture).
+
+
+
+## Cluster State
+
+Cluster state is stored in a central storage location configured by the Auth
+Server. This means that each node is completely stateless and holds no secrets
+such as keys or passwords.
+
+
+
+The cluster state information stored includes:
+
+* Node membership information and online/offline status for each node.
+* List of active sessions.
+* List of locally stored users.
+* RBAC configuration (roles and permissions).
+* Dynamic configuration.
+
+Read more about what is stored in the [Auth Guide](./auth/#auth-state)
+
+## Session Recording
+
+By default, nodes submit SSH session traffic to the Auth server
+for storage. These recorded sessions can be replayed later via `tsh play`
+command or in a web browser.
+
+Some Teleport users mistakenly believe that audit and session recording happen
+by default on the Teleport proxy server. This is not the case because a proxy
+cannot see the encrypted traffic, it is encrypted end-to-end, i.e. from an SSH
+client to an SSH server/node, see the diagram below:
+
+
+
+However, starting from Teleport 2.4, it is possible to configure the
+Teleport proxy to enable "recording proxy mode".
+
+## Trusted Clusters
+
+Teleport Auth Service can allow 3rd party users or nodes to connect to cluster
+nodes if their public keys are signed by a trusted CA. A "trusted cluster" is a
+pair of public keys of the trusted CA. It can be configured via `teleport.yaml`
+file.
+
+
+
+## More Concepts
+
+* [Architecture](./architecture)
+* [Teleport Users](./users)
+* [Teleport Auth](./auth)
+* [Teleport Proxy](./proxy)
+
diff --git a/docs/4.1/architecture/overview.md b/docs/4.1/architecture/overview.md
new file mode 100644
index 0000000000000..7072c30b5e059
--- /dev/null
+++ b/docs/4.1/architecture/overview.md
@@ -0,0 +1,246 @@
+## Architecture Introduction
+
+This guide is for those looking for a deeper understanding of Teleport. If you
+are looking for hands-on instructions on how to set up Teleport for your team,
+check out the [Admin Guide](../admin-guide)
+
+[TOC]
+
+## Design Principles
+
+Teleport was designed in accordance with the following principles:
+
+* **Off the Shelf Security**: Teleport does not re-implement any security
+ primitives and uses well-established, popular implementations of the
+ encryption and network protocols.
+
+* **Open Standards**: There is no security through obscurity. Teleport is fully
+ compatible with existing and open standards and other software, including
+ [OpenSSH](../admin-guide/#using-teleport-with-openssh).
+
+* **Cluster-Oriented Design**: Teleport is built for managing clusters, not
+ individual servers. In practice this means that hosts and [Users](./users)
+ have cluster memberships. Identity management and authorization happen on a
+ cluster level.
+
+* **Built for Teams**: Teleport was created under the assumption of multiple
+ teams operating on several disconnected clusters. Example use cases might be
+ production-vs-staging environment, or a cluster-per-customer or
+ cluster-per-application basis.
+
+This doc introduces the basic concepts of Teleport so you can get started
+managing access!
+
+## Definitions
+
+Here are definitions of the key concepts you will use in teleport.
+
+|Concept | Description
+|------------------|------------
+| Node | A node is a "server", "host" or "computer". Users can create shell sessions to access nodes remotely.
+| User | A user represents someone (a person) or something (a machine) who can perform a set of operations on a node.
+| Cluster | A cluster is a group of nodes that work together and can be considered a single system. Cluster nodes can create connections to each other, often over a private network. Cluster nodes often require TLS authentication to ensure that communication between nodes remains secure and comes from a trusted source.
+| Certificate Authority (CA) | A Certificate Authority issues SSL certificates in the form of public/private keypairs.
+| [Teleport Node](./nodes) | A Teleport Node is a regular node that is running the Teleport Node service. Teleport Nodes can be accessed by authorized Teleport Users. A Teleport Node is always considered a member of a Teleport Cluster, even if it's a single-node cluster.
+| [Teleport User](./users) | A Teleport User represents a someone who needs access to a Teleport Cluster. Users have stored usernames and passwords, and are mapped to OS users on each node. User data is stored locally or in an external store.
+| Teleport Cluster | A Teleport Cluster is comprised of one or more nodes, each of which hold public keys signed by the same [Auth Server CA](./auth). The CA cryptographically signs the public key of a node, establishing cluster membership.
+| [Teleport CA](./auth) | Teleport operates two internal CAs as a function of the Auth service. One is used to sign User public keys and the other signs Node public keys. Each certificate is used to prove identity, cluster membership and manage access.
+
+## Teleport Services
+
+Teleport uses three services which work together: [Nodes](./nodes),
+[Auth](./auth), and [Proxy](./proxy).
+
+[**Teleport Nodes**](./nodes) are servers which can be accessed remotely with
+SSH. The Teleport Node service runs on a machine and is similar to the `sshd`
+daemon you may be familiar with. Users can log in to a Teleport Node with all
+of the following clients:
+
+* [OpenSSH: `ssh` ](../admin-guide/#using-teleport-with-openssh)
+* [Teleport CLI client: `tsh ssh` ](../cli-docs/#tsh-ssh)
+* [Teleport Proxy UI](./proxy/#web-to-ssh-proxy) accessed via a web browser.
+
+[**Teleport Auth**](./auth) authenticates Users and Nodes, authorizes User
+access to Nodes, and acts as a CA by signing certificates issued to Users and
+Nodes.
+
+[**Teleport Proxy**](./proxy) forwards User credentials to the [Auth
+Service](../auth), creates connections to a requested Node after successful
+authentication, and serves a [Web UI](./proxy/#web-to-ssh-proxy).
+
+## Basic Architecture Overview
+
+The numbers correspond to the steps needed to connect a client to a node. These
+steps are explained below the diagram. Read the
+[Architecture Walkthrough](./architecture/#architecture-walkthrough)
+for a detailed view into these connections steps.
+
+!!! warning "Caution"
+ The teleport daemon calls services "roles" in the CLI
+ client. The `--roles` flag has no relationship to concept of User Roles or
+ permissions.
+
+
+
+1. Initiate Client Connection
+2. Authenticate Client
+3. Connect to Node
+4. Authorize Client Access to Node
+
+!!! tip "Tip"
+ In the diagram above we show each Teleport service separately for
+ clarity, but Teleport services do not have to run on separate nodes.
+ Teleport can be run as a binary on a single-node cluster with no external
+ storage backend. We demonstrate this minimal setup in the [Quickstart
+ Guide](../guides/quickstart).
+
+## Detailed Architecture Overview
+
+Here is a detailed diagram of a Teleport Cluster.
+
+The numbers correspond to the steps needed to connect a client to a node. These
+steps are explained in detail below the diagram.
+
+
+
+!!! note "Caution"
+ The Teleport Admin tool, `tctl` , must be physically present
+ on the same machine where Teleport Auth is running. Adding new nodes or
+ inviting new users to the cluster is only possible using this tool.
+
+### 1: Initiate Client Connection
+
+
+
+The client tries to establish an SSH connection to a proxy using the CLI
+interface or a web browser. When establishing a connection, the client offers
+its public key. Clients must always connect through a proxy for two reasons:
+
+1. Individual nodes may not always be reachable from outside a secure network.
+2. Proxies always record SSH sessions and keep track of active user sessions.
+
+ This makes it possible for an SSH user to see if someone else is connected to
+ a node she is about to work on.
+
+### 2: Authenticate Client Certificate
+
+
+
+The proxy checks if the submitted certificate has been previously signed by the
+auth server.
+
+
+
+If there was no key previously offered (first time login) or if the certificate
+has expired, the proxy denies the connection and asks the client to login
+interactively using a password and a 2nd factor if enabled.
+
+Teleport supports
+[Google Authenticator](https://support.google.com/accounts/answer/1066447?hl=en),
+[Authy](https://www.authy.com/), or another
+[TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_algorithm)
+generator. The password + 2nd factor are submitted to a proxy via HTTPS,
+therefore it is critical for a secure configuration of Teleport to install a
+proper HTTPS certificate on a proxy.
+
+!!! warning "Warning":
+ Do not use self-signed SSL/HTTPS certificates in production!
+
+If the credentials are correct, the auth server generates and signs a new
+certificate and returns it to a client via the proxy. The client stores this key
+and will use it for subsequent logins. The key will automatically expire after
+12 hours by default. This TTL can be [configured](../cli-docs/#tctl-users-add)
+to another value by the cluster administrator.
+
+### 3: Lookup Node
+
+
+
+At this step, the proxy tries to locate the requested node in a cluster. There
+are three lookup mechanisms a proxy uses to find the node's IP address:
+
+1. Use DNS to resolve the name requested by the client.
+2. Asks the Auth Server if there is a Node registered with this `nodename` .
+3. Asks the Auth Server to find a node (or nodes) with a label that matches the
+ requested name.
+
+If the node is located, the proxy establishes the connection between the client
+and the requested node. The destination node then begins recording the session,
+sending the session history to the auth server to be stored.
+
+!!! note "Note":
+ Teleport may also be configured to have the session recording
+ occur on the proxy, see [Audit Log](../architecture/#audit-log) for more
+ information.
+
+### 4: Authenticate Node Certificate
+
+
+
+When the node receives a connection request, it checks with the Auth Server to
+validate the node's public key certificate and validate the Node's cluster
+membership.
+
+If the node certificate is valid the node is allowed to access the Auth Server
+API which provides access to information about nodes and users in the cluster.
+
+### 5: Grant User Node Access
+
+
+
+The node requests the Auth Server to provide a list of [OS users (user
+mappings)](./users) for the connecting client, to make sure the client is
+authorized to use the requested OS login.
+
+Finally the client is authorized to create an SSH connection to a node.
+
+
+
+## Teleport CLI Tools
+
+Teleport offers two command line tools. `tsh` is a client tool used by the end
+users, while `tctl` is used for cluster administration.
+
+### TSH
+
+`tsh` is similar in nature to OpenSSH `ssh` or `scp`. In fact, it has
+subcommands named after them so you can call:
+
+```bsh
+$ tsh --proxy=p ssh -p 1522 user@host
+$ tsh --proxy=p scp -P example.txt user@host/destination/dir
+```
+
+Unlike `ssh`, `tsh` is very opinionated about authentication: it always uses
+auto-expiring keys and it always connects to Teleport nodes via a proxy.
+
+When `tsh` logs in, the auto-expiring key is stored in `~/.tsh` and is valid for
+12 hours by default, unless you specify another interval via `--ttl` flag
+(capped by the server-side configuration).
+
+You can learn more about `tsh` in the [User Manual](user-manual.md).
+
+### TCTL
+
+`tctl` is used to administer a Teleport cluster. It connects to the Auth
+server listening on `127.0.0.1` and allows a cluster administrator to manage
+nodes and users in the cluster.
+
+`tctl` is also a tool which can be used to modify the dynamic configuration of
+the cluster, like creating new user roles or connecting trusted clusters.
+
+You can learn more about `tctl` in the [Admin Manual](admin-guide.md).
+
+
+## Next Steps
+
+* If you haven't already, read the [Quickstart Guide](../quickstart) to run a
+minimal setup of Teleport yourself.
+* Set up Teleport for your team with the [Admin Guide](../admin-guide)
+
+Read the rest of the Architecture Guides:
+
+* [Teleport Users](./users)
+* [Teleport Nodes](./nodes)
+* [Teleport Auth](./auth)
+* [Teleport Proxy](./proxy)
diff --git a/docs/4.1/architecture/proxy.md b/docs/4.1/architecture/proxy.md
new file mode 100644
index 0000000000000..986a8adc9ee19
--- /dev/null
+++ b/docs/4.1/architecture/proxy.md
@@ -0,0 +1,98 @@
+## The Proxy Service
+
+[TOC]
+
+The proxy is a stateless service which performs three main functions in a
+Teleport cluster:
+
+1. It serves as an authentication gateway. It asks for credentials from
+ connecting clients and forwards them to the Auth server via [Auth
+ API](./auth/#auth-api).
+
+2. It looks up the IP address for a requested Node and then proxies a connection
+ from client to Node.
+
+3. It serves a Web UI which is used by cluster users to sign up and configure
+ their accounts, explore nodes in a cluster, log into remote nodes, join
+ existing SSH sessions or replay recorded sessions.
+
+## Connecting to a Node
+
+### Web to SSH Proxy
+
+In this mode, Teleport Proxy implements WSS - secure web sockets - to proxy a
+client SSH connection:
+
+
+
+1. User logs in to Web UI using username and password, and 2nd factor token if
+ configured (2FA Tokens are not used with SSO providers).
+2. Proxy passes credentials to the Auth Server's API
+3. If Auth Server accepts credentials, it generates a new web session and
+ generates a special ssh keypair associated with this web session. Auth server
+ starts serving [OpenSSH ssh-agent
+ protocol](https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.agent)
+ to the proxy.
+4. The User obtains an SSH session in the Web UI and can interact with the node
+ on a web-based terminal. From the Node's perspective, it's a regular SSH
+ client connection that is authenticated using an OpenSSH certificate, so no
+ special logic is needed.
+
+!!! note "SSL Encryption": When using the web UI, the Teleport Proxy terminates
+ SSL traffic and re-encodes data for the SSH client connection.
+
+### CLI to SSH Proxy
+
+**Getting Client Certificates**
+
+Teleport Proxy implements a special method to let clients get short-lived
+authentication certificates signed by the [Auth Service User Certificate
+Authority (CA)](./auth/#authentication-in-teleport).:
+
+
+
+1. A [`tsh` client](../cli-docs/#tsh) generates an OpenSSH keypair. It forwards
+ the generated public key, username, password and second factor token to the
+ proxy.
+2. The Proxy Server forwards request to the Auth Server.
+3. If Auth Server accepts credentials, it generates a new certificate signed by
+ its user CA and sends it back to the Proxy Server. The certificate has a TTL
+ which defaults to 24 hours, but can be configured in
+ [`tctl`](./cli-docs/#tctl).
+4. The Proxy Server returns the user certificate to the client and client stores
+ it in `~/.tsh/keys`. The certificate is also added to the local SSH agent if
+ one is running.
+
+**Using Client Certificates**
+
+Once the client has obtained a certificate, it can use it to authenticate with
+any Node in the cluster. Users can use the certificate using a standard OpenSSH
+client `ssh` or using `tsh`:
+
+
+
+1. A client connects to the Proxy Server and provides target node's host and
+ port location. There are three lookup mechanisms a proxy uses to find the
+ node's IP address:
+
+ * Use DNS to resolve the name requested by the client.
+ * Asks the Auth Server if there is a Node registered with this `nodename`.
+ * Asks the Auth Server to find a node (or nodes) with a label that matches
+ the requested name.
+
+2. If the node is located, the Proxy establishes an SSH connection to the
+ requested node and starts forwarding traffic from Node to client.
+3. The client uses the established SSH tunnel from Proxy to Node to open a new
+ SSH connection. The client authenticates with the target Node using its
+ client certificate.
+
+!!! tip "NOTE": Teleport's proxy command makes it compatible with
+ [SSH jump hosts](https://wiki.gentoo.org/wiki/SSH_jump_host) implemented
+ using OpenSSH's `ProxyCommand`. also supports OpenSSH's ProxyJump/ssh -J implementation as of Teleport 4.1.
+
+## More Concepts
+
+* [Architecture Overview](./architecture)
+* [Teleport Users](./users)
+* [Teleport Auth](./auth)
+* [Teleport Nodes](./nodes)
diff --git a/docs/4.1/architecture/users.md b/docs/4.1/architecture/users.md
new file mode 100644
index 0000000000000..9a09e289ce365
--- /dev/null
+++ b/docs/4.1/architecture/users.md
@@ -0,0 +1,123 @@
+## Teleport Users
+
+
+
+[TOC]
+
+## Types of Users
+
+Unlike traditional SSH, Teleport introduces the concept of a User Account. A
+User Account is not the same as SSH login. Instead each Teleport User is
+associated with another account which is used to authenticate the user.
+
+For community edition users, these will be OS users which are administered
+outside of Teleport on each cluster node. For example, there can be a Teleport
+user "joe" who can be given permission to login as "root" to a specific subset
+of nodes. Another user "juliet" could be given permission to OS users "root" and
+to "nginx". Teleport does not have knowledge of the OS Users so it expects both
+"root" and "nginx" to exist on the node.
+
+For enterprise edition users, these can be stored in an external identity
+sources such as OKTA, Active Directory, OneLogin, G Suite, or OIDC. Read the
+[Enterprise Guide](../enterprise) to learn more.
+
+Teleport supports two types of user accounts: **Local Users** and
+**External Users**.
+
+### Local users
+
+Local users are created and stored in Teleport's own identity storage in the
+Auth Server.
+
+Let's look at this table:
+
+|Teleport User | Allowed OS Logins | Description
+|------------------|---------------|-----------------------------
+|joe | joe, root | Teleport user 'joe' can login into member nodes as OS user 'joe' or 'root'
+|juliet | juliet | Teleport user 'juliet' can login into member nodes only as OS user 'juliet'
+|ross | | If no OS login is specified, it defaults to the same name as the Teleport user, here this is "ross".
+
+To add a new user to Teleport, you have to use the `tctl` tool on the same node
+where the auth server is running, i.e. `teleport` was started with
+`--roles=auth` .
+
+A cluster administrator must create account entries for every Teleport user with
+[ `tctl users add` ](../cli-docs). Every Teleport User must be associated with a
+list of one or more machine-level OS usernames.it can authenticate as during a
+login. This list is called "user mappings".
+
+
+
+The diagram shows the following mappings. A couple of noteworthy things
+from this example:
+
+* Teleport User `sandra` does **not** have access to `grav-02`
+through Teleport because `sandra` is not an OS username.
+* Teleport User `joe` has access to all nodes because the OS user `root`
+is present on all nodes.
+
+| Teleport User | logins | has access to nodes
+| -------- | --- | ---- |
+joe | root, joe | grav-00, grav-01, grav-02
+tara | tara | grav-01, grav-02
+teleport | teleport | grav-00, grav-02
+sandra | ops | grav-00, grav-02
+
+Teleport supports second factor authentication (2FA) when using a local auth
+connector and it is enforced by default.
+
+!!! info "2FA Support"
+ 2FA is not supported with SSO providers such as Github or OKTA. To learn
+ more about SSO configuration check out the [SSO section of the Enterprise
+ Guide](../enterprise/#sso)
+
+ There are two types of 2FA supported:
+
+* [TOTP](https://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm)
+
+ is the default. You can use [Google
+ Authenticator](https://en.wikipedia.org/wiki/Google_Authenticator) or
+ [Authy](https://www.authy.com/) or any other TOTP client.
+
+* [U2F](https://en.wikipedia.org/wiki/Universal_2nd_Factor).
+
+### External users
+
+
+
+External users are users stored elsewhere within an organization. Examples
+include Github, Active Directory (AD), OIDC, or any identity store with an
+OpenID/OAuth2 or SAML endpoint.
+
+!!! tip "Version Warning":
+ External user storage is only supported in Teleport
+ Enterprise. Please take a look at the [Teleport
+ Enterprise](../enterprise.md) chapter for more information.
+
+#### Multiple Identity Sources
+
+It is possible to have multiple identity sources configured for a Teleport
+cluster. In this case, an identity source (called a "connector") will have to be
+passed to [ `tsh --auth=connector_name login` ](../cli-docs/#tsh-login).
+
+
+
+The local users connector can be specified via [`tsh --auth=local
+login`](../cli-docs/#tsh-login).
+
+## User Roles
+
+Unlike traditional SSH, each Teleport user account is assigned a `role` . Having
+roles allows Teleport to implement role-based access control (RBAC), i.e. assign
+users to groups (roles) and restrict each role to a subset of actions on a
+subset of nodes in a cluster.
+
+
+
+## More Concepts
+
+* [Architecture Overview](./architecture)
+* [Teleport Auth](./auth)
+* [Teleport Nodes](./nodes)
+* [Teleport Proxy](./proxy)
+
diff --git a/docs/4.1/aws_oss_guide.md b/docs/4.1/aws_oss_guide.md
index 46e315952d4cf..8c109d2e1c566 100644
--- a/docs/4.1/aws_oss_guide.md
+++ b/docs/4.1/aws_oss_guide.md
@@ -1,8 +1,8 @@
# Running Teleport on AWS
We've created this guide to give customers a high level overview of how to use Teleport
-on Amazon Web Services (AWS). This guide provides a high level introduction leading to
-a deep dive into how to setup and run Teleport in production.
+on Amazon Web Services (AWS). This guide provides a high level introduction leading to
+a deep dive into how to setup and run Teleport in production.
We have split this guide into:
@@ -15,47 +15,47 @@ We have split this guide into:
**Why would you want to use Teleport with AWS?**
At some point you'll want to log into the system using SSH
-to help test, debug and troubleshoot a problem box. For EC2, AWS recommends creating
-['Key Pairs'](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
+to help test, debug and troubleshoot a problem box. For EC2, AWS recommends creating
+['Key Pairs'](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html)
and has a range of [other tips for securing EC2 instances](https://aws.amazon.com/articles/tips-for-securing-your-ec2-instance/).
This approach has a number of limitations:
-1. As your organization grows, keeping track of end users' public/private keys becomes
+1. As your organization grows, keeping track of end users' public/private keys becomes
an administrative nightmare.
-2. Using SSH public/private keys has a number of limitations. Read why [SSH Certificates are better](https://gravitational.com/blog/ssh-key-management/).
-3. Once a machine has been bootstrapped with SSH Keys, there isn't an easy way to
- add new keys and delegate access.
+2. Using SSH public/private keys has a number of limitations. Read why [SSH Certificates are better](https://gravitational.com/blog/ssh-key-management/).
+3. Once a machine has been bootstrapped with SSH Keys, there isn't an easy way to
+ add new keys and delegate access.
**Which Services can I use Teleport with?**
You can use Teleport for all the services that you would SSH into. This guide is focused
on EC2. We have a short blog post on using Teleport with [EKS](https://gravitational.com/blog/teleport-aws-eks/). We plan to expand the guide based on feedback but will plan to add instructions
-for the below.
+for the below.
- RDS
- Detailed EKS
- Lightsail
- Fargate
-- AWS ECS
+- AWS ECS
## Teleport Introduction
-This guide will cover how to setup, configure and run Teleport on [AWS](https://aws.amazon.com/).
-
-#### AWS Services required to run Teleport in HA
+This guide will cover how to setup, configure and run Teleport on [AWS](https://aws.amazon.com/).
+
+#### AWS Services required to run Teleport in HA
- [EC2 / Autoscale](#ec2-autoscale)
- [DynamoDB](#dynamodb)
- [S3](#s3)
-- [Route53](#route53)
+- [Route53](#route53)
- [NLB](#nlb-network-load-balancer)
- [IAM](#iam)
- [ACM](#acm)
-- [SSM](#aws-systems-manager-parameter-store)
+- [SSM](#aws-systems-manager-parameter-store)
-We recommend setting up Teleport in high availability mode (HA). In HA mode DynamoDB
-stores the state of the system and S3 will store audit logs.
+We recommend setting up Teleport in high availability mode (HA). In HA mode DynamoDB
+stores the state of the system and S3 will store audit logs.

@@ -63,24 +63,25 @@ stores the state of the system and S3 will store audit logs.
To run Teleport in a HA configuration we recommend using m4.large instances. It's best practice to separate the proxy and authentication server, using autoscaling groups for both machines. We have pre-built AMIs for both Teleport OSS and Enterprise editions. Instructions on using these [AMIs are below](#single-oss-teleport-amis-manual-gui-setup).
### DynamoDB
-DynamoDB is a key-value and document database that delivers single-digit millisecond
-performance at any scale. For large clusters you can provision usage but for smaller
-deployments you can leverage DynamoDB's autoscaling.
+DynamoDB is a key-value and document database that delivers single-digit millisecond
+performance at any scale. For large clusters you can provision usage but for smaller
+deployments you can leverage DynamoDB's autoscaling.
Teleport 4.0 leverages [DynamoDB's streaming feature](
https://github.com/gravitational/teleport/issues/2430). When turning this on, you'll need
-to specify `New Image` from the streaming options. DynamoDB back-end supports two
+to specify `New Image` from the streaming options. DynamoDB back-end supports two
types of Teleport data:
* Cluster state
-* Audit log events
+
+See [DynamoDB Admin Guide for more information](https://gravitational.com/teleport/docs/admin-guide/#using-dynamodb)

-
+
Setting Stream to `NEW IMAGE`
-For maintainability and ease of use, we recommend following our [Terraform example](https://github.com/gravitational/teleport/blob/master/examples/aws/terraform/dynamo.tf)
-but below are high level definitions for the tables required to run Teleport.
+For maintainability and ease of use, we recommend following our [Terraform example](https://github.com/gravitational/teleport/blob/master/examples/aws/terraform/dynamo.tf)
+but below are high level definitions for the tables required to run Teleport.
Cluster State:
@@ -89,60 +90,115 @@ Cluster State:
| Primary partition key | HashKey (String) |
| Primary sort key | FullPath (String) |
-Audit Log:
-
-| Table name | teleport-cluster-name-events |
-|-----------------------|--------------------------------|
-| Primary partition key | SessionID (String) |
-| Primary sort key | EventIndex (Number) |
-### S3
+### S3
Amazon Simple Storage Service (Amazon S3) is an object storage service that offers
-industry-leading scalability, data availability, security, and performance. In this
+industry-leading scalability, data availability, security, and performance. In this
Teleport setup, S3 will provide storage for recorded sessions.
-
-We recommend using Amazon S3 Standard.
+
+We recommend using Amazon S3 Standard.
!!! tip "Tip":
S3 provides [Amazon S3 Object Lock](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html),
- which is useful for customers deploying Teleport in regulated environments.
+ which is useful for customers deploying Teleport in regulated environments.
### Route53
-Route53 is a highly available Domain Name System (DNS) provided by AWS. It'll be
+Route53 is a highly available Domain Name System (DNS) provided by AWS. It'll be
needed to setup a URL for the proxy - we recommend using a subdomain.
e.g. `teleport.acmeinc.com`
### NLB: Network Load Balancer
-AWS provides many different load balancers. To setup Teleport, we recommend
-using a Network Load Balancer. Network Load Balancers provides TLS for the Teleport
-proxy and provides the TCP connections needed for Teleport proxy SSH connections.
+AWS provides many different load balancers. To setup Teleport, we recommend
+using a Network Load Balancer. Network Load Balancers provides TLS for the Teleport
+proxy and provides the TCP connections needed for Teleport proxy SSH connections.
### IAM
-IAM is the recommended tool for creating service access. This guide will follow the
-best practice of principle of least privilege (PoLP).
+IAM is the recommended tool for creating service access. This guide will follow the
+best practice of principle of least privilege (PoLP).
+
+#### IAM for Amazon S3
+
+In order to grant an IAM user in your AWS account access to one of your buckets, `example.s3.bucket` you will need to grant the following permissions: `s3:ListBucket`, `s3:ListBucketVersions`, `s3:PutObject`, `s3:GetObject`, `s3:GetObjectVersion`
+
+An example policy is shown below:
+
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:ListBucket",
+ "s3:ListBucketVersions"
+ ],
+ "Resource": ["arn:aws:s3:::example.s3.bucket"]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "s3:PutObject",
+ "s3:GetObject",
+ "s3:GetObjectVersion"
+ ],
+ "Resource": ["arn:aws:s3:::example.s3.bucket/*"]
+ }
+ ]
+ }
+```
+!!! note "Note":
+ `example.s3.bucket` will need to be replaced with your bucket name.
+
+#### IAM for DynamoDB
+
+In order to grant an IAM user access to DynamoDB make sure that the IAM role assigned to Teleport is configured with proper permissions.
+
+An example policy is shown below:
+
+```
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "AllAPIActionsOnTeleportAuth",
+ "Effect": "Allow",
+ "Action": "dynamodb:*",
+ "Resource": "arn:aws:dynamodb:eu-west-1:123456789012:table/prod.teleport.auth"
+ },
+ {
+ "Sid": "AllAPIActionsOnTeleportStreams",
+ "Effect": "Allow",
+ "Action": "dynamodb:*",
+ "Resource": "arn:aws:dynamodb:eu-west-1:123456789012:table/prod.teleport.auth/stream/*"
+ }
+ ]
+}
+```
+!!! note "Note":
+ `eu-west-1:123456789012:table/prod.teleport.auth` will need to be replaced with your DynamoDB instance.
-### ACM
+### ACM
With AWS Certificate Manager, you can quickly request SSL/TLS certificates.
- TLS Cert: Used to provide SSL for the proxy.
- SSH Certs (not in ACM): Created and self signed by the `authentication server` and are used to
- delegate access to Teleport nodes.
+ delegate access to Teleport nodes.
-### AWS Systems Manager Parameter Store
-To add new nodes to a Teleport Cluster, we recommend using a [strong static token](https://gravitational.com/teleport/docs/admin-guide/#example-configuration). SSM can be also used to store the
-enterprise licence.
+### AWS Systems Manager Parameter Store
+To add new nodes to a Teleport Cluster, we recommend using a [strong static token](https://gravitational.com/teleport/docs/admin-guide/#example-configuration). SSM can be also used to store the
+enterprise licence.
## Setting up a HA Teleport Cluster
-Teleport's config based setup offers a wide range of customization for customers.
+Teleport's config based setup offers a wide range of customization for customers.
This guide offers a range of setup options for AWS. If you have a very large account,
multiple accounts, or over 10k users we would recommend getting in touch. We are
more than happy to help you architect, setup and deploy Teleport into your environment.
-We have these options for you.
+We have these options for you.
- [Using AWS Marketplace (Manual Setup)](#single-oss-teleport-amis-manual-gui-setup)
- [Deploying with CloudFormation](#deploying-with-cloudformation)
@@ -151,27 +207,27 @@ We have these options for you.
### Single OSS Teleport AMIs (Manual / GUI Setup)
This guide provides instructions on deploying Teleport using AMIs, the below instructions
-are designed for using the AMI and GUI. It doesn't setup Teleport in HA, so we recommend
-this as a starting point, but then look at the more advanced sections.
+are designed for using the AMI and GUI. It doesn't setup Teleport in HA, so we recommend
+this as a starting point, but then look at the more advanced sections.
-### Prerequisites
+### Prerequisites
- Obtain a SSL/TLS Certificate using ACM.
Prerequisites setup.
-1. Generate and issue a certificate in [ACM](https://console.aws.amazon.com/acm/home?#)
-for `teleport.acmeinc.com`, use email or DNS validation as appropriate and make sure
+1. Generate and issue a certificate in [ACM](https://console.aws.amazon.com/acm/home?#)
+for `teleport.acmeinc.com`, use email or DNS validation as appropriate and make sure
it’s approved successfully.
-#### Step 1: Subscribe to Teleport Community Edition
-Subscribe to the Teleport Community Edition on the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/B07FYTZB9B).
+#### Step 1: Subscribe to Teleport Community Edition
+Subscribe to the Teleport Community Edition on the [AWS Marketplace](https://aws.amazon.com/marketplace/pp/B07FYTZB9B).
1. Select 'Continue to Subscribe'
-2. Review the Terms and Conditions, and click `Continue to Configuration'
-3. Configure this software. Keep options as set, you might want to change region
+2. Review the Terms and Conditions, and click `Continue to Configuration'
+3. Configure this software. Keep options as set, you might want to change region
to be in the same place as the rest of your infrastructure. Click Continue to Launch
-4. _Launch this software_ Under Choose Action, select Launch through EC2.
+4. _Launch this software_ Under Choose Action, select Launch through EC2.

@@ -187,10 +243,10 @@ We recommend using an `m4.large`, but a `t2.medium` should be good for POC testi

-4. Make sure to write appropriate values to `/etc/teleport.d/conf` via user-data
+4. Make sure to write appropriate values to `/etc/teleport.d/conf` via user-data
(using something like this):
-```json
+```json
#!/bin/bash
cat >/etc/teleport.d/conf <
+
\ No newline at end of file
diff --git a/docs/4.1/img/cert_invalid.svg b/docs/4.1/img/cert_invalid.svg
new file mode 100644
index 0000000000000..efe0599300492
--- /dev/null
+++ b/docs/4.1/img/cert_invalid.svg
@@ -0,0 +1,226 @@
+
+
\ No newline at end of file
diff --git a/docs/4.1/img/cert_ok.svg b/docs/4.1/img/cert_ok.svg
new file mode 100644
index 0000000000000..b8b461cc47452
--- /dev/null
+++ b/docs/4.1/img/cert_ok.svg
@@ -0,0 +1,111 @@
+
+
\ No newline at end of file
diff --git a/docs/4.1/img/client_initiate.png b/docs/4.1/img/client_initiate.png
new file mode 100644
index 0000000000000..4bbfca29ced4d
Binary files /dev/null and b/docs/4.1/img/client_initiate.png differ
diff --git a/docs/4.1/img/client_initiate.svg b/docs/4.1/img/client_initiate.svg
new file mode 100644
index 0000000000000..117035af8f773
--- /dev/null
+++ b/docs/4.1/img/client_initiate.svg
@@ -0,0 +1,102 @@
+
+
\ No newline at end of file
diff --git a/docs/4.1/img/cluster_state.svg b/docs/4.1/img/cluster_state.svg
new file mode 100644
index 0000000000000..1f477b5c1c4e2
--- /dev/null
+++ b/docs/4.1/img/cluster_state.svg
@@ -0,0 +1,162 @@
+
+
\ No newline at end of file
diff --git a/docs/4.1/img/everything.svg b/docs/4.1/img/everything.svg
index 2ee575a765d33..563ea49a47cee 100644
--- a/docs/4.1/img/everything.svg
+++ b/docs/4.1/img/everything.svg
@@ -1,10 +1,10 @@
-