diff --git a/README.md b/README.md index 4549fc4..8e88a48 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,7 @@ Please review the [LICENSE](./LICENSE) in this repository for additional details Please note the following: - Zenhub ships with two backend technologies we call **Raptor** and **Toad**. -- Raptor and Toad are both made up of several microservices (eg. `raptor-admin`, `raptor-api`, `toad-webhook`, etc...) +- Raptor and Toad are both made up of several microservices (eg. `raptor-admin`, `raptor-api`, etc...) - Zenhub requires the use of two databases: **MongoDB** and **PostgreSQL**. - Zenhub requires the use of one instance of **Redis**. We recommend this instance is managed externally (internal for Zenhub as a VM) as it requires data persistence. - Zenhub requires the use of a message broker via **RabbitMQ**. diff --git a/k8s-cluster/README.md b/k8s-cluster/README.md index 9fed761..347eec9 100644 --- a/k8s-cluster/README.md +++ b/k8s-cluster/README.md @@ -55,6 +55,8 @@ - [8.4 Azure Active Directory](#84-azure-active-directory) - [8.5 LDAP](#85-ldap) - [8.6 SAML](#86-saml) +- [9. Integrations](#9-integrations) + - [9.1 Notion](#91-notion) ## 1. Getting Started @@ -95,9 +97,9 @@ You will need to [set up an OAuth App](https://docs.github.com/en/developers/app To get started with Zenhub, you must have an existing Kubernetes cluster set up. You should: -- Be using Kubernetes (>= 1.22). +- Be using Kubernetes (>= 1.26). - Have `kubectl` installed locally with credentials to access the cluster. -- Have [`kustomize`](https://kustomize.io/) installed locally (>= 4.5.3). +- Have [`kustomize`](https://kustomize.io/) installed locally (>= 4.5.7). - Create a dedicated Kubernetes namespace. Grant your user full access to that namespace. - Have the capability to pull Docker images from Zenhub's public Docker registry or have access to a private Docker registry where you can push images (and your cluster should have the ability to pull from that private registry). @@ -880,3 +882,9 @@ Of the authentication methods listed below, the only one that is enabled by defa Service Provider Attribute Name mappings for the following attributes: - **Email**: `email` - **Name**: `name` + +## 9. Integrations + +### 9.1 Notion + +Zenhub Enterprise for K8s can be integrated with Notion to allow users to preview Notion links within Zenhub Issues. This integration is disabled by default and can be enabled by following instructions for `notion` in the main `kustomization.yaml`. \ No newline at end of file diff --git a/k8s-cluster/base/devsite/apps_v1_deployment_devsite.yaml b/k8s-cluster/base/devsite/apps_v1_deployment_devsite.yaml index cf11a8b..85b38ef 100644 --- a/k8s-cluster/base/devsite/apps_v1_deployment_devsite.yaml +++ b/k8s-cluster/base/devsite/apps_v1_deployment_devsite.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - app.kubernetes.io/version: 4.0.2 + app.kubernetes.io/version: 4.1.0 generation: 1 labels: app.kubernetes.io/component: devsite @@ -21,7 +21,7 @@ spec: template: metadata: annotations: - app.kubernetes.io/version: 4.0.2 + app.kubernetes.io/version: 4.1.0 creationTimestamp: null labels: app.kubernetes.io/component: devsite diff --git a/k8s-cluster/base/gateway/configmaps.yaml b/k8s-cluster/base/gateway/configmaps.yaml index fefc21c..b378c34 100644 --- a/k8s-cluster/base/gateway/configmaps.yaml +++ b/k8s-cluster/base/gateway/configmaps.yaml @@ -181,7 +181,7 @@ data: raptoradmin: raptor-admin socket: toad-websocket webapp: kraken-webapp - webhook: toad-webhook + webhook: raptor-webhook kind: ConfigMap metadata: labels: diff --git a/k8s-cluster/base/kraken/configmaps.yaml b/k8s-cluster/base/kraken/configmaps.yaml index 96ca33a..7cd8a02 100644 --- a/k8s-cluster/base/kraken/configmaps.yaml +++ b/k8s-cluster/base/kraken/configmaps.yaml @@ -130,6 +130,7 @@ data: "isDev": false, "isEnterprise": true, "isLicenseGovernanceEnabled": false, + "isNotionIntegrationEnabled": false, "isTrackerEnabled": false, "isUploadFileToLocal": false, "loginURL": "https://%%subdomain_suffix%%.%%domain_tld%%/api/auth/github", diff --git a/k8s-cluster/base/non-dynamic/set-db-ca-mounts.yaml b/k8s-cluster/base/non-dynamic/set-db-ca-mounts.yaml index d3948d7..edf9fb4 100644 --- a/k8s-cluster/base/non-dynamic/set-db-ca-mounts.yaml +++ b/k8s-cluster/base/non-dynamic/set-db-ca-mounts.yaml @@ -72,31 +72,6 @@ spec: secret: secretName: postgres-ca-bundle ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toad-webhook -spec: - template: - spec: - containers: - - name: toad-webhook - volumeMounts: - - mountPath: /var/ca-bundle/mongo - name: mongo-ca-bundle - readOnly: true - - mountPath: /var/ca-bundle/postgres - name: postgres-ca-bundle - readOnly: true - volumes: - - name: mongo-ca-bundle - secret: - secretName: mongo-ca-bundle - - name: postgres-ca-bundle - secret: - secretName: postgres-ca-bundle - --- apiVersion: apps/v1 kind: Deployment @@ -269,6 +244,30 @@ spec: secret: secretName: postgres-ca-bundle +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: raptor-webhook +spec: + template: + spec: + containers: + - name: raptor-webhook + volumeMounts: + - mountPath: /var/ca-bundle/mongo + name: mongo-ca-bundle + readOnly: true + - mountPath: /var/ca-bundle/postgres + name: postgres-ca-bundle + readOnly: true + volumes: + - name: mongo-ca-bundle + secret: + secretName: mongo-ca-bundle + - name: postgres-ca-bundle + secret: + secretName: postgres-ca-bundle --- apiVersion: batch/v1 diff --git a/k8s-cluster/base/non-dynamic/set-deployments-resources.yaml b/k8s-cluster/base/non-dynamic/set-deployments-resources.yaml index 4da2fd2..6325ab7 100644 --- a/k8s-cluster/base/non-dynamic/set-deployments-resources.yaml +++ b/k8s-cluster/base/non-dynamic/set-deployments-resources.yaml @@ -150,16 +150,16 @@ spec: memory: 1G --- -# toad-webhook +# toad-worker apiVersion: apps/v1 kind: Deployment metadata: - name: toad-webhook + name: toad-worker spec: template: spec: containers: - - name: toad-webhook + - name: toad-worker resources: limits: cpu: 1000m @@ -169,16 +169,16 @@ spec: memory: 1G --- -# toad-worker +# raptor-webhook apiVersion: apps/v1 kind: Deployment metadata: - name: toad-worker + name: raptor-webhook spec: template: spec: containers: - - name: toad-worker + - name: raptor-webhook resources: limits: cpu: 1000m diff --git a/k8s-cluster/base/non-dynamic/set-raptor-buckets-vars.yaml b/k8s-cluster/base/non-dynamic/set-raptor-buckets-vars.yaml index ee43bff..6940591 100644 --- a/k8s-cluster/base/non-dynamic/set-raptor-buckets-vars.yaml +++ b/k8s-cluster/base/non-dynamic/set-raptor-buckets-vars.yaml @@ -1,3 +1,52 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: raptor-webhook + name: raptor-webhook +spec: + template: + spec: + containers: + - name: raptor-webhook + env: + - name: GITHUB_APP_SECRET + valueFrom: + secretKeyRef: + key: github_app_secret + name: configuration + - name: S3_PRIVATE_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: bucket_secret_access_key + name: configuration + - name: FILE_DOWNLOAD_URL + valueFrom: + configMapKeyRef: + key: zenhub_server_address + name: toad + - name: IS_UPLOAD_FILE_TO_LOCAL + valueFrom: + configMapKeyRef: + name: configuration + key: local_files + - name: S3_REGION + valueFrom: + configMapKeyRef: + key: bucket_region + name: configuration + - name: S3_PRIVATE_BUCKET + valueFrom: + configMapKeyRef: + key: files_bucket_name + name: configuration + - name: S3_PRIVATE_ACCESS_KEY_ID + valueFrom: + configMapKeyRef: + key: bucket_access_key_id + name: configuration +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/k8s-cluster/base/non-dynamic/set-var-mongo-shortindex.yaml b/k8s-cluster/base/non-dynamic/set-var-mongo-shortindex.yaml index d3afaa6..a71ccec 100644 --- a/k8s-cluster/base/non-dynamic/set-var-mongo-shortindex.yaml +++ b/k8s-cluster/base/non-dynamic/set-var-mongo-shortindex.yaml @@ -48,23 +48,6 @@ spec: name: configuration key: mongo_is_documentdb ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toad-webhook -spec: - template: - spec: - containers: - - name: toad-webhook - env: - - name: MONGO_IS_DOCUMENTDB - valueFrom: - configMapKeyRef: - name: configuration - key: mongo_is_documentdb - --- apiVersion: apps/v1 kind: Deployment diff --git a/k8s-cluster/base/pgbouncer/apps_v1_deployment_pgbouncer.yaml b/k8s-cluster/base/pgbouncer/apps_v1_deployment_pgbouncer.yaml index 272b3ea..79dd346 100644 --- a/k8s-cluster/base/pgbouncer/apps_v1_deployment_pgbouncer.yaml +++ b/k8s-cluster/base/pgbouncer/apps_v1_deployment_pgbouncer.yaml @@ -32,7 +32,7 @@ spec: value: verify-full - name: SERVER_TLS_CA_FILE value: /var/ca-bundle/postgres/postgres-ca.pem - image: us.gcr.io/zenhub-public/pgbouncer:zhe-4.0.2 + image: us.gcr.io/zenhub-public/pgbouncer:zhe-4.1.0 imagePullPolicy: IfNotPresent lifecycle: preStop: diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-admin.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-admin.yaml index a26efc4..b8a4559 100644 --- a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-admin.yaml +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-admin.yaml @@ -249,16 +249,6 @@ spec: configMapKeyRef: key: auth_jwks_url name: raptor - - name: AUTH0_TENANT - valueFrom: - configMapKeyRef: - key: auth0_tenant - name: raptor - - name: AUTH0_DB_CONNECTION_NAME - valueFrom: - configMapKeyRef: - key: auth0_db_connection_name - name: raptor - name: MAILGUN_DOMAIN valueFrom: configMapKeyRef: @@ -450,11 +440,6 @@ spec: secretKeyRef: key: hubspot_access_token name: raptor - - name: AUTH_ZENHUB_APP_CLIENT_ID - valueFrom: - secretKeyRef: - key: auth_zenhub_app_client_id - name: raptor - name: AUTH_MANAGEMENT_API_CLIENT_ID valueFrom: secretKeyRef: @@ -545,7 +530,7 @@ spec: resources: limits: cpu: 987m - memory: 1000Mi + memory: 1500Mi requests: cpu: 400m memory: 750Mi diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api-public.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api-public.yaml index 988f58a..fdd0e95 100644 --- a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api-public.yaml +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api-public.yaml @@ -241,16 +241,6 @@ spec: configMapKeyRef: key: auth_jwks_url name: raptor - - name: AUTH0_TENANT - valueFrom: - configMapKeyRef: - key: auth0_tenant - name: raptor - - name: AUTH0_DB_CONNECTION_NAME - valueFrom: - configMapKeyRef: - key: auth0_db_connection_name - name: raptor - name: MAILGUN_DOMAIN valueFrom: configMapKeyRef: @@ -442,11 +432,6 @@ spec: secretKeyRef: key: hubspot_access_token name: raptor - - name: AUTH_ZENHUB_APP_CLIENT_ID - valueFrom: - secretKeyRef: - key: auth_zenhub_app_client_id - name: raptor - name: AUTH_MANAGEMENT_API_CLIENT_ID valueFrom: secretKeyRef: diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api.yaml index 3e69887..6724bd9 100644 --- a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api.yaml +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-api.yaml @@ -188,11 +188,23 @@ spec: configMapKeyRef: key: otel_log_level name: raptor - - name: AUTH0_LOG_STREAM_TOKEN + - name: IS_NOTION_INTEGRATION_ENABLED + valueFrom: + configMapKeyRef: + key: notion_enabled + name: configuration + optional: true + - name: NOTION_CLIENT_ID + valueFrom: + configMapKeyRef: + key: notion_client_id + name: configuration + optional: true + - name: NOTION_CLIENT_SECRET valueFrom: secretKeyRef: - key: auth0_log_stream_token - name: raptor-api + key: notion_client_secret + name: configuration optional: true - name: RAILS_MAX_THREADS valueFrom: @@ -355,16 +367,6 @@ spec: configMapKeyRef: key: auth_jwks_url name: raptor - - name: AUTH0_TENANT - valueFrom: - configMapKeyRef: - key: auth0_tenant - name: raptor - - name: AUTH0_DB_CONNECTION_NAME - valueFrom: - configMapKeyRef: - key: auth0_db_connection_name - name: raptor - name: MAILGUN_DOMAIN valueFrom: configMapKeyRef: @@ -446,11 +448,6 @@ spec: configMapKeyRef: key: google_oauth2_client_id name: raptor - - name: NOTION_CLIENT_ID - valueFrom: - configMapKeyRef: - key: notion_client_id - name: raptor - name: X_FORWARDED_FOR_TRUSTED_PROXIES valueFrom: configMapKeyRef: @@ -556,11 +553,6 @@ spec: secretKeyRef: key: hubspot_access_token name: raptor - - name: AUTH_ZENHUB_APP_CLIENT_ID - valueFrom: - secretKeyRef: - key: auth_zenhub_app_client_id - name: raptor - name: AUTH_MANAGEMENT_API_CLIENT_ID valueFrom: secretKeyRef: @@ -622,11 +614,6 @@ spec: secretKeyRef: key: google_oauth2_client_secret name: raptor - - name: NOTION_CLIENT_SECRET - valueFrom: - secretKeyRef: - key: notion_client_secret - name: raptor image: raptor-backend imagePullPolicy: IfNotPresent livenessProbe: diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-cable.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-cable.yaml index 1ebee1c..8823c8c 100644 --- a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-cable.yaml +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-cable.yaml @@ -234,16 +234,6 @@ spec: configMapKeyRef: key: auth_jwks_url name: raptor - - name: AUTH0_TENANT - valueFrom: - configMapKeyRef: - key: auth0_tenant - name: raptor - - name: AUTH0_DB_CONNECTION_NAME - valueFrom: - configMapKeyRef: - key: auth0_db_connection_name - name: raptor - name: MAILGUN_DOMAIN valueFrom: configMapKeyRef: @@ -431,11 +421,6 @@ spec: secretKeyRef: key: hubspot_access_token name: raptor - - name: AUTH_ZENHUB_APP_CLIENT_ID - valueFrom: - secretKeyRef: - key: auth_zenhub_app_client_id - name: raptor - name: AUTH_MANAGEMENT_API_CLIENT_ID valueFrom: secretKeyRef: diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker-default.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker-default.yaml index 8b99b81..9d305b4 100644 --- a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker-default.yaml +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker-default.yaml @@ -236,16 +236,6 @@ spec: configMapKeyRef: key: auth_jwks_url name: raptor - - name: AUTH0_TENANT - valueFrom: - configMapKeyRef: - key: auth0_tenant - name: raptor - - name: AUTH0_DB_CONNECTION_NAME - valueFrom: - configMapKeyRef: - key: auth0_db_connection_name - name: raptor - name: MAILGUN_DOMAIN valueFrom: configMapKeyRef: @@ -437,11 +427,6 @@ spec: secretKeyRef: key: hubspot_access_token name: raptor - - name: AUTH_ZENHUB_APP_CLIENT_ID - valueFrom: - secretKeyRef: - key: auth_zenhub_app_client_id - name: raptor - name: AUTH_MANAGEMENT_API_CLIENT_ID valueFrom: secretKeyRef: diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker.yaml index 52a0e1b..8bbf233 100644 --- a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker.yaml +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-sidekiq-worker.yaml @@ -234,16 +234,6 @@ spec: configMapKeyRef: key: auth_jwks_url name: raptor - - name: AUTH0_TENANT - valueFrom: - configMapKeyRef: - key: auth0_tenant - name: raptor - - name: AUTH0_DB_CONNECTION_NAME - valueFrom: - configMapKeyRef: - key: auth0_db_connection_name - name: raptor - name: MAILGUN_DOMAIN valueFrom: configMapKeyRef: @@ -435,11 +425,6 @@ spec: secretKeyRef: key: hubspot_access_token name: raptor - - name: AUTH_ZENHUB_APP_CLIENT_ID - valueFrom: - secretKeyRef: - key: auth_zenhub_app_client_id - name: raptor - name: AUTH_MANAGEMENT_API_CLIENT_ID valueFrom: secretKeyRef: diff --git a/k8s-cluster/base/raptor/apps_v1_deployment_raptor-webhook.yaml b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-webhook.yaml new file mode 100644 index 0000000..c23209b --- /dev/null +++ b/k8s-cluster/base/raptor/apps_v1_deployment_raptor-webhook.yaml @@ -0,0 +1,519 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: raptor-webhook + name: raptor-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: raptor-webhook + template: + metadata: + labels: + app.kubernetes.io/component: raptor-webhook + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/component: raptor-webhook + namespaces: [] + topologyKey: kubernetes.io/hostname + weight: 2 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/component: raptor-webhook + namespaces: [] + topologyKey: topology.kubernetes.io/zone + weight: 2 + containers: + - args: + - bundle + - exec + - rails + - s + - -b + - 0.0.0.0 + env: + - name: DATABASE_URL + valueFrom: + secretKeyRef: + key: postgres_url + name: configuration + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + key: raptor_redis_password + name: internal + - name: CACHE_REDIS_URL + value: redis://default:$(REDIS_PASSWORD)@cache-raptor-redis-master:6379/0 + - name: ENTERPRISE_LICENSE_TOKEN + valueFrom: + secretKeyRef: + key: enterprise_license_token + name: configuration + - name: CABLE_URL + value: raptor-cable + - name: TOAD_APP_URL + value: toad-api + - name: GITHUB_APP_ID + valueFrom: + configMapKeyRef: + key: github_app_id + name: configuration + - name: OTEL_LOG_LEVEL + valueFrom: + configMapKeyRef: + key: otel_log_level + name: raptor + - name: RAILS_MAX_THREADS + valueFrom: + configMapKeyRef: + key: rails_max_threads + name: raptor-webhook + - name: WEB_CONCURRENCY + valueFrom: + configMapKeyRef: + key: web_concurrency + name: raptor-webhook + - name: RUBYOPT + value: -W0 + - name: HOST_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: http://$(HOST_IP):4318 + - name: APP_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/component'] + - name: APP_DEPLOY_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['app.kubernetes.io/instance'] + - name: OTEL_SERVICE_NAME + value: $(APP_DEPLOY_ENV)-$(APP_SERVICE) + - name: RAILS_ENV + valueFrom: + configMapKeyRef: + key: rails_env + name: raptor + - name: RAILS_LOG_TO_STDOUT + valueFrom: + configMapKeyRef: + key: rails_log_to_stdout + name: raptor + - name: LOG_JSON + valueFrom: + configMapKeyRef: + key: log_json + name: raptor + - name: GITHUB_HTML_URL + valueFrom: + configMapKeyRef: + key: github_html_url + name: raptor + - name: GITHUB_SERVER_ADDRESS + valueFrom: + configMapKeyRef: + key: github_html_url + name: raptor + - name: GITHUB_API_URL + valueFrom: + configMapKeyRef: + key: github_api_url + name: raptor + - name: GITHUB_GRAPHQL_URL + valueFrom: + configMapKeyRef: + key: github_graphql_url + name: raptor + - name: SENTRY_DSN + valueFrom: + configMapKeyRef: + key: sentry_dsn + name: raptor + - name: SENTRY_ENVIRONMENT + valueFrom: + configMapKeyRef: + key: sentry_environment + name: raptor + - name: NEW_RELIC_AGENT_ENABLED + valueFrom: + configMapKeyRef: + key: new_relic_agent_enabled + name: raptor + - name: NEW_RELIC_APP_NAME + valueFrom: + configMapKeyRef: + key: new_relic_app_name + name: raptor + - name: NEW_RELIC_APPLICATION_LOGGING_ENABLED + valueFrom: + configMapKeyRef: + key: new_relic_application_logging_enabled + name: raptor + optional: true + - name: NEW_RELIC_APPLICATION_LOGGING_FORWARDING_ENABLED + valueFrom: + configMapKeyRef: + key: new_relic_application_logging_forwarding_enabled + name: raptor + optional: true + - name: DISABLE_PREPARE_DB + valueFrom: + configMapKeyRef: + key: disable_prepare_db + name: raptor + - name: GITHUB_WEBHOOK_ENABLED + valueFrom: + configMapKeyRef: + key: github_webhook_enabled + name: raptor + - name: ZENHUB_WEBHOOK_DOMAIN_V2 + valueFrom: + configMapKeyRef: + key: zenhub_webhook_domain_v2 + name: raptor + - name: LD_OFFLINE + valueFrom: + configMapKeyRef: + key: ld_offline + name: raptor + - name: IS_ENTERPRISE + valueFrom: + configMapKeyRef: + key: is_enterprise + name: raptor + - name: CABLE_ALLOWED_ORIGINS + valueFrom: + configMapKeyRef: + key: cable_allowed_origins + name: raptor + - name: ACTION_CABLE_WORKERS_POOL_SIZE + valueFrom: + configMapKeyRef: + key: action_cable_workers_pool_size + name: raptor + - name: HUBSPOT_PORTAL_ID + valueFrom: + configMapKeyRef: + key: hubspot_portal_id + name: raptor + - name: HUBSPOT_PLATFORM_SIGNUP_FORM_GUID + valueFrom: + configMapKeyRef: + key: hubspot_platform_signup_form_guid + name: raptor + - name: MIXPANEL_PROJECT_ID + valueFrom: + configMapKeyRef: + key: mixpanel_project_id + name: raptor + - name: HUBSPOT_PAID_USER_FORM_GUID + valueFrom: + configMapKeyRef: + key: hubspot_paid_user_form_guid + name: raptor + - name: AUTH_ISSUER_URLS + valueFrom: + configMapKeyRef: + key: auth_issuer_urls + name: raptor + - name: AUTH_JWKS_URL + valueFrom: + configMapKeyRef: + key: auth_jwks_url + name: raptor + - name: MAILGUN_DOMAIN + valueFrom: + configMapKeyRef: + key: mailgun_domain + name: raptor + - name: WEBAPP_URL + valueFrom: + configMapKeyRef: + key: webapp_url + name: raptor + - name: STRIPE_TRIAL_PRICE_ID + valueFrom: + configMapKeyRef: + key: stripe_trial_price_id + name: raptor + - name: STRIPE_MONTHLY_PRICE_ID + valueFrom: + configMapKeyRef: + key: stripe_monthly_price_id + name: raptor + - name: STRIPE_YEARLY_PRICE_ID + valueFrom: + configMapKeyRef: + key: stripe_yearly_price_id + name: raptor + - name: S3_ACCESS_KEY_ID + valueFrom: + configMapKeyRef: + key: s3_access_key_id + name: raptor + - name: S3_BUCKET + valueFrom: + configMapKeyRef: + key: s3_bucket + name: raptor + - name: S3_PRIVATE_ACCESS_KEY_ID + valueFrom: + configMapKeyRef: + key: s3_private_access_key_id + name: raptor + - name: S3_PRIVATE_BUCKET + valueFrom: + configMapKeyRef: + key: s3_private_bucket + name: raptor + - name: S3_REGION + valueFrom: + configMapKeyRef: + key: s3_region + name: raptor + - name: FILES_UPLOAD_PATH + valueFrom: + configMapKeyRef: + key: files_upload_path + name: raptor + optional: true + - name: IS_UPLOAD_FILE_TO_LOCAL + valueFrom: + configMapKeyRef: + key: is_upload_file_to_local + name: raptor + - name: FILE_DOWNLOAD_URL + valueFrom: + configMapKeyRef: + key: file_download_url + name: raptor + - name: TOAD_PUBLIC_URL + valueFrom: + configMapKeyRef: + key: toad_public_url + name: raptor + - name: OPENAI_ORGANIZATION_ID + valueFrom: + configMapKeyRef: + key: openai_organization_id + name: raptor + - name: GOOGLE_OAUTH2_CLIENT_ID + valueFrom: + configMapKeyRef: + key: google_oauth2_client_id + name: raptor + - name: NOTION_CLIENT_ID + valueFrom: + configMapKeyRef: + key: notion_client_id + name: raptor + - name: X_FORWARDED_FOR_TRUSTED_PROXIES + valueFrom: + configMapKeyRef: + key: x_forwarded_for_trusted_proxies + name: raptor + optional: true + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SIDEKIQ_REDIS_URL + valueFrom: + secretKeyRef: + key: redis_url + name: configuration + - name: GRAPHQL_REDIS_URL + valueFrom: + secretKeyRef: + key: graphql_redis_url + name: configuration + - name: SECRET_KEY_BASE + valueFrom: + secretKeyRef: + key: secret_key_base + name: internal + - name: LOCKBOX_MASTER_KEY + valueFrom: + secretKeyRef: + key: lockbox_master_key + name: internal + - name: GITHUB_TOKEN_VALUE_KEY + valueFrom: + secretKeyRef: + key: github_token_value_key + name: internal + - name: GITHUB_WEBHOOKS_SECRET + valueFrom: + secretKeyRef: + key: github_webhooks_secret + name: internal + - name: INTERNAL_AUTH_TOKEN + valueFrom: + secretKeyRef: + key: internal_auth_token + name: internal + - name: LD_SDK_KEY + valueFrom: + secretKeyRef: + key: ld_sdk_key + name: raptor + - name: NEW_RELIC_LICENSE_KEY + valueFrom: + secretKeyRef: + key: new_relic_license_key + name: raptor + - name: TOAD_MONGO_URL + valueFrom: + secretKeyRef: + key: mongo_url + name: configuration + - name: TOAD_RABBITMQ_URL + valueFrom: + secretKeyRef: + key: rabbitmq_url + name: configuration + - name: CABLE_REDIS_URL + valueFrom: + secretKeyRef: + key: cable_redis_url + name: configuration + - name: MIXPANEL_PROJECT_TOKEN + valueFrom: + secretKeyRef: + key: mixpanel_token + name: raptor + - name: MIXPANEL_SERVICE_ACCOUNT_USERNAME + valueFrom: + secretKeyRef: + key: mixpanel_service_account_username + name: raptor + - name: MIXPANEL_SERVICE_ACCOUNT_SECRET + valueFrom: + secretKeyRef: + key: mixpanel_service_account_secret + name: raptor + - name: FRESHWORKS_API_KEY + valueFrom: + secretKeyRef: + key: freshworks_api_key + name: raptor + - name: FRESH_SUCCESS_API_KEY + valueFrom: + secretKeyRef: + key: fresh_success_api_key + name: raptor + - name: PRODUCT_BOARD_KEY + valueFrom: + secretKeyRef: + key: product_board_key + name: raptor + - name: HUBSPOT_ACCESS_TOKEN + valueFrom: + secretKeyRef: + key: hubspot_access_token + name: raptor + - name: AUTH_MANAGEMENT_API_CLIENT_ID + valueFrom: + secretKeyRef: + key: auth_management_api_client_id + name: raptor + - name: AUTH_MANAGEMENT_API_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: auth_management_api_client_secret + name: raptor + - name: MAILGUN_API_KEY + valueFrom: + secretKeyRef: + key: mailgun_api_key + name: raptor + - name: STRIPE_API_KEY + valueFrom: + secretKeyRef: + key: stripe_api_key + name: raptor + - name: STRIPE_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + key: stripe_webhook_secret + name: raptor + - name: DEVISE_JWT_SECRET_KEY + valueFrom: + secretKeyRef: + key: devise_jwt_secret_key + name: raptor + - name: S3_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: s3_secret_access_key + name: raptor + - name: S3_PRIVATE_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: s3_private_secret_access_key + name: raptor + - name: OPENAI_API_KEY + valueFrom: + secretKeyRef: + key: openai_api_key + name: raptor + - name: GITHUB_APP_SECRET + valueFrom: + secretKeyRef: + key: github_app_secret + name: configuration + - name: GLOBAL_GITHUB_ADMIN_TOKEN + valueFrom: + secretKeyRef: + key: global_github_admin_token + name: global-github-admin-token-secret + optional: true + - name: GOOGLE_OAUTH2_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: google_oauth2_client_secret + name: raptor + - name: NOTION_CLIENT_SECRET + valueFrom: + secretKeyRef: + key: notion_client_secret + name: raptor + image: raptor-backend + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /ping/all + port: http + initialDelaySeconds: 40 + periodSeconds: 10 + timeoutSeconds: 5 + name: raptor-webhook + ports: + - containerPort: 5000 + name: http + protocol: TCP + readinessProbe: + httpGet: + path: /ping/all + port: http + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + resources: + limits: + cpu: 700m + memory: 2000Mi + requests: + cpu: 600m + memory: 1500Mi diff --git a/k8s-cluster/base/raptor/autoscaling_v2_horizontalpodautoscaler_raptor-webhook.yaml b/k8s-cluster/base/raptor/autoscaling_v2_horizontalpodautoscaler_raptor-webhook.yaml new file mode 100644 index 0000000..b08aff5 --- /dev/null +++ b/k8s-cluster/base/raptor/autoscaling_v2_horizontalpodautoscaler_raptor-webhook.yaml @@ -0,0 +1,20 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + labels: + app.kubernetes.io/component: raptor-webhook + name: raptor-webhook +spec: + maxReplicas: 1 + metrics: + - resource: + name: cpu + target: + averageUtilization: 80 + type: Utilization + type: Resource + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: raptor-webhook diff --git a/k8s-cluster/base/raptor/batch_v1_job_raptor-db-migrate.yaml b/k8s-cluster/base/raptor/batch_v1_job_raptor-db-migrate.yaml index 9abe364..99f5c19 100644 --- a/k8s-cluster/base/raptor/batch_v1_job_raptor-db-migrate.yaml +++ b/k8s-cluster/base/raptor/batch_v1_job_raptor-db-migrate.yaml @@ -18,7 +18,7 @@ spec: - name: MIGRATIONS_DATABASE_URL valueFrom: secretKeyRef: - key: postgres_url + key: pgbouncer_url name: configuration - name: DATABASE_URL valueFrom: diff --git a/k8s-cluster/base/raptor/configmaps.yaml b/k8s-cluster/base/raptor/configmaps.yaml index 62afe30..37d5583 100644 --- a/k8s-cluster/base/raptor/configmaps.yaml +++ b/k8s-cluster/base/raptor/configmaps.yaml @@ -62,6 +62,14 @@ metadata: name: raptor-sidekiq --- apiVersion: v1 +data: + rails_max_threads: "5" + web_concurrency: "4" +kind: ConfigMap +metadata: + name: raptor-webhook +--- +apiVersion: v1 data: action_cable_workers_pool_size: "2" auth_issuer_urls: "" diff --git a/k8s-cluster/base/raptor/kustomization.yaml b/k8s-cluster/base/raptor/kustomization.yaml index 18cc281..58cc91e 100644 --- a/k8s-cluster/base/raptor/kustomization.yaml +++ b/k8s-cluster/base/raptor/kustomization.yaml @@ -7,9 +7,11 @@ resources: - apps_v1_deployment_raptor-cable.yaml - apps_v1_deployment_raptor-sidekiq-worker-default.yaml - apps_v1_deployment_raptor-sidekiq-worker.yaml +- apps_v1_deployment_raptor-webhook.yaml - autoscaling_v2_horizontalpodautoscaler_raptor-api-public.yaml - autoscaling_v2_horizontalpodautoscaler_raptor-api.yaml - autoscaling_v2_horizontalpodautoscaler_raptor-cable.yaml +- autoscaling_v2_horizontalpodautoscaler_raptor-webhook.yaml - batch_v1_job_raptor-db-migrate.yaml - configmaps.yaml - persistentvolume.yaml diff --git a/k8s-cluster/base/raptor/service.yaml b/k8s-cluster/base/raptor/service.yaml index 3e3e45f..9303f4f 100644 --- a/k8s-cluster/base/raptor/service.yaml +++ b/k8s-cluster/base/raptor/service.yaml @@ -63,3 +63,19 @@ spec: app.kubernetes.io/component: raptor-cable type: ClusterIP --- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: raptor-webhook + name: raptor-webhook +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + selector: + app.kubernetes.io/component: raptor-webhook + type: ClusterIP +--- diff --git a/k8s-cluster/base/toad/apps_v1_deployment_toad-api.yaml b/k8s-cluster/base/toad/apps_v1_deployment_toad-api.yaml index 44bff7b..09d39c2 100644 --- a/k8s-cluster/base/toad/apps_v1_deployment_toad-api.yaml +++ b/k8s-cluster/base/toad/apps_v1_deployment_toad-api.yaml @@ -367,26 +367,6 @@ spec: secretKeyRef: key: chart_mogul_api_token name: toad - - name: AUTH0_AUDIENCE - valueFrom: - configMapKeyRef: - key: auth0_audience - name: toad - - name: AUTH0_CLIENT_ID - valueFrom: - configMapKeyRef: - key: auth0_client_id - name: toad - - name: AUTH0_DOMAIN - valueFrom: - configMapKeyRef: - key: auth0_domain - name: toad - - name: AUTH0_ISSUER - valueFrom: - configMapKeyRef: - key: auth0_issuer - name: toad image: toad-backend imagePullPolicy: IfNotPresent livenessProbe: diff --git a/k8s-cluster/base/toad/apps_v1_deployment_toad-cron.yaml b/k8s-cluster/base/toad/apps_v1_deployment_toad-cron.yaml index d4cf3e0..cb14748 100644 --- a/k8s-cluster/base/toad/apps_v1_deployment_toad-cron.yaml +++ b/k8s-cluster/base/toad/apps_v1_deployment_toad-cron.yaml @@ -350,26 +350,6 @@ spec: secretKeyRef: key: chart_mogul_api_token name: toad - - name: AUTH0_AUDIENCE - valueFrom: - configMapKeyRef: - key: auth0_audience - name: toad - - name: AUTH0_CLIENT_ID - valueFrom: - configMapKeyRef: - key: auth0_client_id - name: toad - - name: AUTH0_DOMAIN - valueFrom: - configMapKeyRef: - key: auth0_domain - name: toad - - name: AUTH0_ISSUER - valueFrom: - configMapKeyRef: - key: auth0_issuer - name: toad image: toad-backend imagePullPolicy: IfNotPresent name: toad-cron diff --git a/k8s-cluster/base/toad/apps_v1_deployment_toad-websocket.yaml b/k8s-cluster/base/toad/apps_v1_deployment_toad-websocket.yaml index b99c523..9d06a28 100644 --- a/k8s-cluster/base/toad/apps_v1_deployment_toad-websocket.yaml +++ b/k8s-cluster/base/toad/apps_v1_deployment_toad-websocket.yaml @@ -354,26 +354,6 @@ spec: secretKeyRef: key: chart_mogul_api_token name: toad - - name: AUTH0_AUDIENCE - valueFrom: - configMapKeyRef: - key: auth0_audience - name: toad - - name: AUTH0_CLIENT_ID - valueFrom: - configMapKeyRef: - key: auth0_client_id - name: toad - - name: AUTH0_DOMAIN - valueFrom: - configMapKeyRef: - key: auth0_domain - name: toad - - name: AUTH0_ISSUER - valueFrom: - configMapKeyRef: - key: auth0_issuer - name: toad image: toad-backend imagePullPolicy: IfNotPresent livenessProbe: diff --git a/k8s-cluster/base/toad/apps_v1_deployment_toad-worker.yaml b/k8s-cluster/base/toad/apps_v1_deployment_toad-worker.yaml index 4698f76..6e429cb 100644 --- a/k8s-cluster/base/toad/apps_v1_deployment_toad-worker.yaml +++ b/k8s-cluster/base/toad/apps_v1_deployment_toad-worker.yaml @@ -350,26 +350,6 @@ spec: secretKeyRef: key: chart_mogul_api_token name: toad - - name: AUTH0_AUDIENCE - valueFrom: - configMapKeyRef: - key: auth0_audience - name: toad - - name: AUTH0_CLIENT_ID - valueFrom: - configMapKeyRef: - key: auth0_client_id - name: toad - - name: AUTH0_DOMAIN - valueFrom: - configMapKeyRef: - key: auth0_domain - name: toad - - name: AUTH0_ISSUER - valueFrom: - configMapKeyRef: - key: auth0_issuer - name: toad image: toad-backend imagePullPolicy: IfNotPresent name: toad-worker diff --git a/k8s-cluster/base/toad/configmaps.yaml b/k8s-cluster/base/toad/configmaps.yaml index bc4f6ed..6e42659 100644 --- a/k8s-cluster/base/toad/configmaps.yaml +++ b/k8s-cluster/base/toad/configmaps.yaml @@ -1,12 +1,4 @@ apiVersion: v1 -data: - web_concurrency: "4" - webhook_port: "3031" -kind: ConfigMap -metadata: - name: toad-webhook ---- -apiVersion: v1 data: attribution_app_id: "" auth0_audience: "" diff --git a/k8s-cluster/base/toad/kustomization.yaml b/k8s-cluster/base/toad/kustomization.yaml index 52c8e7d..90715f6 100644 --- a/k8s-cluster/base/toad/kustomization.yaml +++ b/k8s-cluster/base/toad/kustomization.yaml @@ -3,11 +3,9 @@ kind: Kustomization resources: - apps_v1_deployment_toad-api.yaml - apps_v1_deployment_toad-cron.yaml -- apps_v1_deployment_toad-webhook.yaml - apps_v1_deployment_toad-websocket.yaml - apps_v1_deployment_toad-worker.yaml - autoscaling_v2_horizontalpodautoscaler_toad-api.yaml -- autoscaling_v2_horizontalpodautoscaler_toad-webhook.yaml - autoscaling_v2_horizontalpodautoscaler_toad-websocket.yaml - configmaps.yaml - persistentvolume.yaml diff --git a/k8s-cluster/base/toad/service.yaml b/k8s-cluster/base/toad/service.yaml index ae7ba37..8894b87 100644 --- a/k8s-cluster/base/toad/service.yaml +++ b/k8s-cluster/base/toad/service.yaml @@ -14,13 +14,6 @@ metadata: --- apiVersion: v1 kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: toad-webhook - name: toad-automount-sa-token-false-webhook ---- -apiVersion: v1 -kind: ServiceAccount metadata: labels: app.kubernetes.io/component: toad-websocket @@ -52,22 +45,6 @@ spec: --- apiVersion: v1 kind: Service -metadata: - labels: - app.kubernetes.io/component: toad-webhook - name: toad-webhook -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: http - selector: - app.kubernetes.io/component: toad-webhook - type: ClusterIP ---- -apiVersion: v1 -kind: Service metadata: labels: app.kubernetes.io/component: toad-websocket diff --git a/k8s-cluster/configmap-generator.sh b/k8s-cluster/configmap-generator.sh index e42f02f..e4052a6 100644 --- a/k8s-cluster/configmap-generator.sh +++ b/k8s-cluster/configmap-generator.sh @@ -20,6 +20,7 @@ export `grep -hir "w3id_enabled=" kustomization.yaml | awk '{print $2}'` export `grep -hir "azure_ad_enabled=" kustomization.yaml | awk '{print $2}'` export `grep -hir "ldap_enabled=" kustomization.yaml | awk '{print $2}'` export `grep -hir "saml_enabled=" kustomization.yaml | awk '{print $2}'` +export `grep -hir "notion_enabled=" kustomization.yaml | awk '{print $2}'` zhe_hostname="$subdomain_suffix.$domain_tld" https_zhe_hostname="https://$zhe_hostname" @@ -32,6 +33,7 @@ auth_options_w3id=$w3id_enabled auth_options_azure_ad=$azure_ad_enabled auth_options_ldap=$ldap_enabled auth_options_saml=$saml_enabled +feat_options_notion=$notion_enabled function is_gnu_sed(){ sed --version >/dev/null 2>&1 @@ -80,3 +82,6 @@ sed_wrap "s/\"SAML\": .*,/\"SAML\": $auth_options_saml,/g" base/kraken/configmap # Adding a comma will break firefox extension publishing https://github.com/ZenHubHQ/devops/issues/1889 # If adding a new auth type, keep Zenhub at the bottom of the list sed_wrap "s/\"Zenhub\": .*/\"Zenhub\": $auth_options_email_pw/g" base/kraken/configmaps.yaml + +# Replace integration values in base/kraken/configmaps.yaml +sed_wrap "s/\"isNotionIntegrationEnabled\": .*/\"isNotionIntegrationEnabled\": $feat_options_notion,/g" base/kraken/configmaps.yaml \ No newline at end of file diff --git a/k8s-cluster/deployment-checklist.txt b/k8s-cluster/deployment-checklist.txt index 0705f80..f55c614 100644 --- a/k8s-cluster/deployment-checklist.txt +++ b/k8s-cluster/deployment-checklist.txt @@ -11,9 +11,9 @@ - [ ] Configure an OAuth App in GitHub Enterprise Server #### Kubernetes Requirements -- [ ] Obtain access to a Kubernetes cluster running K8s v1.22 or greater +- [ ] Obtain access to a Kubernetes cluster running K8s v1.26 or greater - [ ] Install `kubectl` locally -- [ ] Install `kustomize` v4.5.3 or greater locally +- [ ] Install `kustomize` v4.5.7 or greater locally - [ ] Create a dedicated namespace in the cluster for Zenhub - [ ] Have the capability to pull the Docker images from Zenhub's public Docker registry or have access to a private Docker registry where you can push images (and the cluster should have the ability to pull from that private registry) diff --git a/k8s-cluster/kustomization.yaml b/k8s-cluster/kustomization.yaml index 4380d05..c785eaf 100644 --- a/k8s-cluster/kustomization.yaml +++ b/k8s-cluster/kustomization.yaml @@ -18,7 +18,8 @@ patchesStrategicMerge: # - options/pullpolicy/always.yaml #! Sets the image pull policy of deployments to Always # - options/scaling/deployments-scaling.yaml #! Change number of replicas and scaling # - options/scaling/deployments-resources-small.yaml #! Enable to use minimum required resources, or customize them yourself -# - options/pgbouncer/pgbouncer-config.yaml #! Enable if custom configuration is required for pgbouncer (connection and pool limits, etc). +# - options/pgbouncer/pgbouncer-config.yaml #! Enable if password_encryption is set to scram-sha-256 on your postgres database, + #! or if you need to change the default pool size and connection limits. #! See documentation in options/pgbouncer/README.md to make configuration changes. # [EDIT] (Optional) specify any labels you want applied to the deployments. @@ -29,32 +30,32 @@ commonLabels: app.kubernetes.io/managed-by: kustomize commonAnnotations: - app.kubernetes.io/version: 4.0.2 + app.kubernetes.io/version: 4.1.0 # [EDIT] If your cluster does not have access to our public Docker registry, # update the `newName` values with paths to your own private Docker registry. images: - name: kraken-webapp newName: us.gcr.io/zenhub-public/kraken-webapp - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: kraken-extension newName: us.gcr.io/zenhub-public/kraken-extension - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: kraken-zhe-admin newName: us.gcr.io/zenhub-public/kraken-zhe-admin - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: raptor-backend newName: us.gcr.io/zenhub-public/raptor-backend - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: toad-backend newName: us.gcr.io/zenhub-public/toad-backend - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: sanitycheck newName: us.gcr.io/zenhub-public/sanitycheck - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: devsite newName: us.gcr.io/zenhub-public/devsite - newTag: zhe-4.0.2 + newTag: zhe-4.1.0 - name: busybox newName: docker.io/library/busybox newTag: latest @@ -160,7 +161,12 @@ configMapGenerator: - saml_enabled=false # - saml_idp_metadata_url= # Metadata URL for your Zenhub SAML App # - saml_sp_entity_id= # Unique Service Provider ID for your Zenhub SAML App - + + # Notion integration configuration + # [EDIT] (optional) Change notion_enabled to true, then uncomment and fill out the rest of the options to enable + - notion_enabled=false + # - notion_client_id= # e.g. 1234-abcd-1234567890ab-12345678-abcd + # Do not create/edit zhe-urls.env file. This is managed by the configmap-generator.sh script. - name: configuration behavior: merge @@ -199,6 +205,9 @@ secretGenerator: # - azure_ad_client_secret= # e.g. abcd12~efgh567ijkl890mnopq123rstu456vwxyz # [EDIT] (optional) Uncomment and fill to enable LDAP authentication # - ldap_bind_password= + # [EDIT] (optional) Uncomment and fill to enable Notion integration + # - notion_client_secret= # e.g. secret_12345678abcd1234abcd1234567890ab + # [EDIT] Required certificate for Postgres DB SSL/TLS. # [NOTE] Your provided certificate must be named postgres-ca.pem @@ -216,6 +225,7 @@ secretGenerator: # [EDIT] If you need admin access to PgBouncer to view pools, connections, etc. and the username that you use to connect to PostgreSQL is not named "postgres". # [EDIT] Or if you just need to tune the default PgBouncer pool size and connection limits to better suit your database. + # [EDIT] Or if password_encryption is set to scram-sha-256 on your postgres database and md5 authentication cannot be used. # [IMPORTANT NOTE] See documentation in options/pgbouncer/README.md to make configuration changes. # - name: pgbouncer-userlist # files: diff --git a/k8s-cluster/options/pgbouncer/README.md b/k8s-cluster/options/pgbouncer/README.md index 4442474..89dec18 100644 --- a/k8s-cluster/options/pgbouncer/README.md +++ b/k8s-cluster/options/pgbouncer/README.md @@ -1,48 +1,72 @@ -# PGBouncer Configuration +# PgBouncer Configuration for Zenhub Enterprise -PGBouncer is an open-source, lightweight, single-binary connection pooler for PostgreSQL. +PgBouncer is an open-source, lightweight, single-binary connection pooler for PostgreSQL. It helps PostgresSQL run at scale with a large number of client connections. -## Prerequisites +## Overview -- Be ready with your Postgres database user connection information. +If this is your first time setting up this pgbouncer configuration, please follow the instructions in: [Configuration for Postgres using scram-sha-256](#configuration-for-postgres-using-scram-sha-256). -## How to Configure +If you are upgrading your postgres from a version that supports md5 authentication to a version that uses scram-sha-256 authentication, please follow the instructions in: [Configuration Upgrade from MD5 to SCRAM-SHA-256 Encryption](#configuration-upgrade-from-md5-to-scram-sha-256-encryption) -### userlist.txt +## Configuration for Postgres Using scram-sha-256 -This configuration file holds the username and md5 hashed password+username combinations of the user(s) that have access to the PostgreSQL database to authenticate to the database as well as to connect to the internal 'pgbouncer' database that is used to manage PgBouncer once it is running. +You will need to fill out and apply the userlist.txt and pgbouncer.ini files in order to configure PgBouncer. The instructions below will guide you through the process and show you how to setup the configuration files to work with scram-sha-256 authentication on your Postgres database. -The file should look similar to the following when filled out: +### Prerequisites -```bash -"zenhub" "md5asdasd" -``` +- Be ready with your Postgres database user connection information. +- You are running Postgres version that uses password_encryption = scram-sha-256 + +### Configuring the userlist.txt file -The example above is for a Postgres user with the name 'zenhub'. +This configuration file holds the username and passwords of the user(s) that have access to the PostgreSQL database to authenticate to the database as well as to connect to the internal 'pgbouncer' database that is used to manage PgBouncer once it is running. -Here are 3 ways you can create the md5 hash, where the username is 'zenhub' and the password is 'password123' as an example: +The file will need to hold two users: -#### Linux +1) `my-pgbouncer-username` user, which is dedicated for PgBouncer -> Postgres communication + - This is the same user that should be entered in **pgbouncer.ini** under the `[databases]` section + - This password must be entered in plain-text in the file in order to use SCRAM authentication to Postgres [[reference](https://postgrespro.com/docs/postgrespro/9.6/pgbouncer#:~:text=To%20be%20able%20to%20use%20SCRAM%20on%20server%20connections%2C%20use%20plain%2Dtext%20passwords.)] +2) `my-postgres-username` user, which the Zenhub application stores and uses for Application -> PgBouncer or Postgres + - This is the same user that should be used in `pgbouncer_url` and `postgres_url` in the main **kustomization.yaml** file + - This password should be entered in SCRAM-SHA-256 format in the file + +If you do not have access to the `pg_authid` table, you may use the provided scram.py script to generate the SCRAM-SHA-256 hash for your `my-postgres-username` user: ```bash -echo -n "md5"; echo -n "password123zenhub" | md5sum | awk '{print $1}' +chmod 700 scram.py +./scram.py ``` -#### MacOS +Once you have your SCRAM-SHA-256 hash, you may add it to the userlist.txt file. The resulting file should look similar to the following: -```bash -echo -n "md5"; md5 -qs "password123zenhub" +```txt +"" "plaintext" +"" "SCRAM-SHA-256..." ``` -#### Python 3 +If you do not already have a dedicated PgBouncer user yet to use as `my-pgbouncer-username`, please follow the instructions under [How to Create a Dedicated PgBouncer User](#how-to-create-a-dedicated-pgbouncer-user) just below: + +#### How to Create a Dedicated PgBouncer User + +Connect to your Postgres database as an administrative user and run the following commands: + +> Set `my-pgbouncer-username` to the username of the new dedicated user to be created that manages PgBouncer to Postgres communication +> +> Set `my-pgbouncer-password` to the password to be created for the new user `my-pgbouncer-username` -```python -import hashlib -print("md5" + hashlib.md5("password123" + "zenhub").hexdigest()) +```sql +--- Create the new `my-pgbouncer-username` user with a new password +SET password_encryption='scram-sha-256'; CREATE USER WITH PASSWORD ''; + +--- Grant permissions to `my-pgbouncer-username` user +GRANT CREATE ON SCHEMA public TO ; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON tables TO ; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public to ""; +GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ""; ``` -### pgbouncer.ini +### Configuring the pgbouncer.ini file This configuration file holds the settings to be applied to PgBouncer. This includes TLS settings, database connection information, pooling configuration, etc. @@ -54,17 +78,85 @@ Find and replace the following variables with values relevant to your deployment `my-postgres-port` - The port of the database. -`my-postgres-username` - The username of the user for connecting to the Postgres database and internal PgBouncer management database. Replace both occurrences. +`my-pgbouncer-username` - The username of the dedicated user that is used for PgBouncer to Postgres communication. `my-max-client-conn` - The maximum number of client connections that will need to concurrently access the database. Use your monthly active users as a reference. -`my-default-pool-size` - The size of the connection pool that will interact directly with Postgres. Set this to the number of available client PostgresSQL connections. Do not occupy all connections, as some connections may need to be left available for database administrator. For example, if your PostgresSQL max_connections is 515, save 15 connections for administration by setting `my-default-pool-size` to `500`. +`my-default-pool-size` - The size of the connection pool that will interact directly with Postgres. Set this to the number of available client PostgreSQL connections. Do not occupy all connections, as some connections may need to be left available for the database administrator. For example, if your PostgreSQL max_connections is 515, save 15 connections for administration by setting `my-default-pool-size` to `500`. + +## How to Upgrade from MD5 to SCRAM-SHA-256 Encryption + +Since Postgres Version 14, the default encryption method for authentication is SCRAM-SHA-256. PgBouncer will fail to communicate to the Postgres server unless the configuration is updated to match this restriction. +This section defines the steps for upgrading from the older, MD5 encryption standard to SCRAM-SHA-256. + +1. First of all, you will need a dedicated PgBouncer user to handle communication from the PgBouncer to Postgres database. If you already have a dedicated user, please skip this step. Otherwise, please follow the instructions in [How to Create a Dedicated PgBouncer User](#how-to-create-a-dedicated-pgbouncer-user). + +2. **Before upgrading** to Postgres Version 14+ (or any Postgres database that defaults to SCRAM-SHA-256), please make sure to re-hash the users that are used to connect to Postgres by following the instructions in [How to Rehash Passwords in SCRAM-SHA-256](#how-to-rehash-passwords-in-scram-sha-256). + +The [pgbouncer] section of pgbouncer.ini has been modified in ZHE version 4.1 to set auth_type to `scram-sha-256` instead of `md5`, so if you are upgrading from a version that used md5, please ensure it is set to `scram-sha-256` in your pgbouncer.ini file. + +### How to Rehash Passwords in SCRAM-SHA-256 + +The below instructions will trigger the re-hashing of passwords in SCRAM-SHA-256 format. You may use the same password for existing users. + +Find and replace the following variables with values relevant to your deployment: + +- `my-pgbouncer-username` - The username of the dedicated user to connect from PgBouncer to Postgres. +- `my-pgbouncer-password` - The password of the user `my-pgbouncer-username`. +- `my-postgres-username` - The username of the user that is used to connect from the Zenhub application to PgBouncer or Postgres. +- `my-postgres-password` - The password of the user `my-postgres-username`. + +Connect to your Postgres database as an administrative user and run the following commands: + +```sql +--- Alter the `my-pgbouncer-username` user hash +SET password_encryption='scram-sha-256'; ALTER USER WITH PASSWORD ''; + +--- Verify the `my-pgbouncer-username` user hash +--- This may not work for some Postgres service providers due to restricted access to pg_shadow table. +SELECT passwd FROM pg_shadow WHERE usename = ''; + +--- Alter the "" user hash +SET password_encryption='scram-sha-256'; ALTER USER postgres WITH PASSWORD ''; + +--- Verfiy the "" user hash +--- This may not work for some Postgres service providers due to restricted access to pg_shadow table. +SELECT passwd FROM pg_shadow WHERE usename = ''; +``` + +On a successful re-hash, the verification steps above should return a hash that starts with `SCRAM-SHA-256$4096`. If you do not have access to the `pg_shadow`, you will not be able to verify the hash. + +Next, you will need to update the `userlist.txt` and `pgbouncer.ini` files with new configuration and the new passwords. For the `userlist.txt` file, you will need to update the `my-pgbouncer-username` password to the new plain-text password. For the `pgbouncer.ini` file, you will need to update the user in the `[databases]` section to the new dedicated `my-pgbouncer-username` user and update the `auth_type` to `scram-sha-256`. How to do this is described in the [Configuration for Postgres using scram-sha-256](#configuration-for-postgres-using-scram-sha-256) section. + +Once the `userlist.txt` and `pgbouncer.ini` files are updated, you will need to update the pgbouncer K8S secrets and roll out the changes to the pgbouncer pod. To do this, follow the below steps: + +1. Run the kustomize build process from the main directory to apply the changes to the pgbouncer K8S secrets. + + Check the diff to make sure the changes are as expected: + + ```bash + kustomize build . | kubectl diff -f- + ``` + + If the diff looks good, apply the changes: + + ```bash + kustomize build . | kubectl apply -f- + ``` + +2. Restart the pgbouncer deployment so that it configures pgbouncer using the updated secrets. + + ```bash + kubectl rollout restart deployment pgbouncer -n + ``` + +## Usage -## How to Connect +### How to Connect to the PgBouncer Admin Database Once PgBouncer is deployed, you can connect to the 'pgbouncer' administration database via these steps: -1. Find the ClusterIP of the pgbouncer service and podname for use later: `kubectl -n zenhub get svc,pod -l app.kubernetes.io/name=pgbouncer` -2. Exec into the pgbouncer pod: `kubectl -n zenhub exec -it -- sh` +1. Find the ClusterIP of the PgBouncer service and pod name for use later: `kubectl -n zenhub get svc,pod -l app.kubernetes.io/name=pgbouncer` +2. Exec into the PgBouncer pod: `kubectl -n exec -it -- sh` 3. Run this command: `psql -p 5432 -U --host -d pgbouncer` 4. You're now connected! You can run commands like `show pools;`, or `show databases;`. Admin console command reference: https://www.pgbouncer.org/usage.html#admin-console \ No newline at end of file diff --git a/k8s-cluster/options/pgbouncer/pgbouncer.ini b/k8s-cluster/options/pgbouncer/pgbouncer.ini index afabeca..d0a9c09 100644 --- a/k8s-cluster/options/pgbouncer/pgbouncer.ini +++ b/k8s-cluster/options/pgbouncer/pgbouncer.ini @@ -1,6 +1,6 @@ ################## Manually generated ################## [databases] -my-db-name = host=my-postgres-host-name port=my-postgres-port user=my-postgres-username +my-db-name = host=my-postgres-host-name port=my-postgres-port user=my-pgbouncer-username dbname=my-db-name [pgbouncer] listen_addr = 0.0.0.0 @@ -8,7 +8,7 @@ listen_port = 5432 unix_socket_dir = user = postgres auth_file = /etc/pgbouncer/userlist.txt -auth_type = md5 +auth_type = scram-sha-256 pool_mode = transaction ignore_startup_parameters = extra_float_digits # Extras @@ -16,13 +16,16 @@ max_client_conn = my-max-client-conn default_pool_size = my-default-pool-size # Log settings -admin_users = my-postgres-username +admin_users = my-pgbouncer-username # Connection sanity checks, timeouts -# TLS settings +# TLS settings default: requires pgbouncer to connect to PostgreSQL using TLS +# Uncomment below if not using verify-full, as server_tls_sslmode = prefer may cause the application to fail silently +# server_tls_sslmode = require + server_tls_sslmode = verify-full server_tls_ca_file = /var/ca-bundle/postgres/postgres-ca.pem # Dangerous timeouts -################## end file ################## \ No newline at end of file +################## end file ################## diff --git a/k8s-cluster/options/pgbouncer/scram.py b/k8s-cluster/options/pgbouncer/scram.py new file mode 100755 index 0000000..e188755 --- /dev/null +++ b/k8s-cluster/options/pgbouncer/scram.py @@ -0,0 +1,46 @@ +#!/usr/bin/python3 +import hmac +import sys +from base64 import standard_b64encode +from hashlib import pbkdf2_hmac, sha256 +from os import urandom + +salt_size = 16 +digest_len = 32 +iterations = 4096 + + +def b64enc(b: bytes) -> str: + return standard_b64encode(b).decode("utf8") + + +def pg_scram_sha256(passwd: str) -> str: + salt = urandom(salt_size) + digest_key = pbkdf2_hmac( + "sha256", passwd.encode("utf8"), salt, iterations, digest_len + ) + client_key = hmac.digest(digest_key, "Client Key".encode("utf8"), "sha256") + stored_key = sha256(client_key).digest() + server_key = hmac.digest(digest_key, "Server Key".encode("utf8"), "sha256") + return ( + f"SCRAM-SHA-256${iterations}:{b64enc(salt)}" + f"${b64enc(stored_key)}:{b64enc(server_key)}" + ) + + +def print_usage(): + print("Usage: python scram.py ") + sys.exit(1) + + +def main(): + if len(sys.argv) != 2: + print_usage() + + passwd = sys.argv[1] + + print(pg_scram_sha256(passwd)) + + +if __name__ == "__main__": + main() diff --git a/k8s-cluster/options/pullpolicy/always.yaml b/k8s-cluster/options/pullpolicy/always.yaml index ecb66ca..7e242da 100644 --- a/k8s-cluster/options/pullpolicy/always.yaml +++ b/k8s-cluster/options/pullpolicy/always.yaml @@ -33,18 +33,6 @@ spec: - name: toad-websocket imagePullPolicy: Always ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toad-webhook -spec: - template: - spec: - containers: - - name: toad-webhook - imagePullPolicy: Always - --- apiVersion: apps/v1 kind: Deployment diff --git a/k8s-cluster/options/scaling/deployments-resources-small.yaml b/k8s-cluster/options/scaling/deployments-resources-small.yaml index e96e63d..f444101 100644 --- a/k8s-cluster/options/scaling/deployments-resources-small.yaml +++ b/k8s-cluster/options/scaling/deployments-resources-small.yaml @@ -169,25 +169,6 @@ spec: cpu: 200m memory: 500M ---- -# toad-webhook -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toad-webhook -spec: - template: - spec: - containers: - - name: toad-webhook - resources: - limits: - cpu: 500m - memory: 2500M - requests: - cpu: 200m - memory: 500M - --- # toad-worker apiVersion: apps/v1 diff --git a/k8s-cluster/options/scaling/deployments-scaling.yaml b/k8s-cluster/options/scaling/deployments-scaling.yaml index 26ee1ab..a71a21f 100644 --- a/k8s-cluster/options/scaling/deployments-scaling.yaml +++ b/k8s-cluster/options/scaling/deployments-scaling.yaml @@ -111,15 +111,6 @@ metadata: spec: replicas: 2 ---- -# toad-webhook -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toad-webhook -spec: - replicas: 2 - --- # toad-worker apiVersion: apps/v1 diff --git a/k8s-cluster/options/zenhub-registry/zenhub-registry.yaml b/k8s-cluster/options/zenhub-registry/zenhub-registry.yaml index 5609932..61f1665 100644 --- a/k8s-cluster/options/zenhub-registry/zenhub-registry.yaml +++ b/k8s-cluster/options/zenhub-registry/zenhub-registry.yaml @@ -30,17 +30,6 @@ spec: imagePullSecrets: - name: zenhub-docker-registry-credentials ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toad-webhook -spec: - template: - spec: - imagePullSecrets: - - name: zenhub-docker-registry-credentials - --- apiVersion: apps/v1 kind: Deployment diff --git a/k8s-cluster/support-bundle/README.md b/k8s-cluster/support-bundle/README.md index 1f5b763..1616ef2 100644 --- a/k8s-cluster/support-bundle/README.md +++ b/k8s-cluster/support-bundle/README.md @@ -22,7 +22,7 @@ We have prepared a Python script that you can use to send us your support bundle ### Prerequisites - Must have a support bundle already generated that you want to upload -- Python and the boto3 package installed +- Python 3 with the boto3 and PyYaml packages installed - Contact Zenhub Support to request a set of access keys, then set them in the environment ```bash @@ -40,7 +40,7 @@ From this directory, run the `upload_bundle.py` script like so: - Replace `` with the full path to the tar you want to upload ```bash -python upload_bundle.py +python3 upload_bundle.py ``` Then contact Zenhub Enterprise Support with a description of your problem and the name of your support bundle. diff --git a/k8s-cluster/support-bundle/requirements.txt b/k8s-cluster/support-bundle/requirements.txt index e304f9a..a5de9d4 100644 --- a/k8s-cluster/support-bundle/requirements.txt +++ b/k8s-cluster/support-bundle/requirements.txt @@ -1 +1,2 @@ -boto3==1.26.115 \ No newline at end of file +boto3==1.26.115 +PyYAML==5.3.1 \ No newline at end of file diff --git a/k8s-cluster/support-bundle/upload_bundle.py b/k8s-cluster/support-bundle/upload_bundle.py index bc7a58a..ddd051c 100644 --- a/k8s-cluster/support-bundle/upload_bundle.py +++ b/k8s-cluster/support-bundle/upload_bundle.py @@ -1,15 +1,23 @@ +import base64 +import json from argparse import ArgumentParser from os import getenv, path from sys import exit from boto3 import client +from yaml import safe_load +access_key_id: str = getenv("SUPPORT_BUNDLE_ACCESS_KEY") +secret_access_key: str = getenv("SUPPORT_BUNDLE_SECRET_KEY") + +# Optional overwrites for development/testing bucket_name: str = getenv( "SUPPORT_BUNDLE_BUCKET_NAME", "zenhub-enterprise-support-bundles" ) +config_path: str = getenv( + "SUPPORT_BUNDLE_CONFIG_FILE", "/opt/zenhub/configuration/configuration.yaml" +) bucket_region: str = getenv("SUPPORT_BUNDLE_BUCKET_REGION", "us-west-2") -access_key_id: str = getenv("SUPPORT_BUNDLE_ACCESS_KEY") -secret_access_key: str = getenv("SUPPORT_BUNDLE_SECRET_KEY") def validate_env_vars() -> None: @@ -48,10 +56,36 @@ def setup_arg_parser() -> ArgumentParser: return parser -def upload_file_to_s3(customer_identifier: str, bundle_path: str) -> None: +def get_company_name(customer_identifier: str) -> str: + """This will attempt to find the company_name in the ZHE license token + If it can't be found, we fall back to customer_identifier (github hostname on vm) + In a ZHE for K8s context, this will always return the customer_identifier""" + + try: + with open(config_path, "r") as yamlfile: + configuration = safe_load(yamlfile) + + license_jwt_token = configuration["zenhub_configuration"][ + "ENTERPRISE_LICENSE_TOKEN" + ] + payload = license_jwt_token.split(".")[1] + decoded_payload = json.loads(base64.b64decode(payload + "==")) + company_name = decoded_payload["company_name"] + + print(f"Using '{company_name}' instead of customer_identifier.") + + return company_name + except Exception as e: + print( + "Could not find company_name in license token, continuing with customer_identifier." + ) + return customer_identifier + + +def upload_file_to_s3(directory_name: str, bundle_path: str) -> None: # The file name is the last part of the path bundle_file_name: str = bundle_path.split("/")[-1] - s3_object_name: str = f"{customer_identifier}/{bundle_file_name}" + s3_object_name: str = f"{directory_name}/{bundle_file_name}" s3 = client( "s3", @@ -59,11 +93,14 @@ def upload_file_to_s3(customer_identifier: str, bundle_path: str) -> None: aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, ) - - s3.upload_file(bundle_path, bucket_name, s3_object_name) - print( - f"Successfully uploaded bundle {s3_object_name}, please notify Zenhub Support and provide a description of the issue you are experiencing." - ) + try: + s3.upload_file(bundle_path, bucket_name, s3_object_name) + print( + f"Successfully uploaded bundle {s3_object_name}, please notify Zenhub Support and provide a description of the issue you are experiencing." + ) + except Exception as e: + print(f"Failed to upload metrics to S3.\nError: {e}") + exit(1) def main() -> None: @@ -77,7 +114,8 @@ def main() -> None: validate_env_vars() validate_bundle_file_path(args.bundle_file_path) - upload_file_to_s3(args.customer_identifier, args.bundle_file_path) + directory_name = get_company_name(args.customer_identifier) + upload_file_to_s3(directory_name, args.bundle_file_path) if __name__ == "__main__": diff --git a/k8s-cluster/template-files/kraken-configmaps.template b/k8s-cluster/template-files/kraken-configmaps.template index 96ca33a..7cd8a02 100644 --- a/k8s-cluster/template-files/kraken-configmaps.template +++ b/k8s-cluster/template-files/kraken-configmaps.template @@ -130,6 +130,7 @@ data: "isDev": false, "isEnterprise": true, "isLicenseGovernanceEnabled": false, + "isNotionIntegrationEnabled": false, "isTrackerEnabled": false, "isUploadFileToLocal": false, "loginURL": "https://%%subdomain_suffix%%.%%domain_tld%%/api/auth/github", diff --git a/k8s-cluster/update/batch_v1_job_data_migration.yaml b/k8s-cluster/update/batch_v1_job_data_migration.yaml index e975071..0ea7aa1 100644 --- a/k8s-cluster/update/batch_v1_job_data_migration.yaml +++ b/k8s-cluster/update/batch_v1_job_data_migration.yaml @@ -15,7 +15,7 @@ spec: - bundle - exec - rake - - zhe:migrate_4_0 + - zhe:migrate_4_1 env: - name: MONGO_IS_DOCUMENTDB valueFrom: @@ -195,7 +195,7 @@ spec: valueFrom: secretKeyRef: key: graphql_redis_url - name: raptor-migrate + name: configuration - name: SECRET_KEY_BASE valueFrom: secretKeyRef: @@ -254,7 +254,7 @@ spec: valueFrom: secretKeyRef: key: cable_redis_url - name: raptor-migrate + name: configuration - name: CABLE_ALLOWED_ORIGINS valueFrom: configMapKeyRef: @@ -295,7 +295,7 @@ spec: secretKeyRef: key: google_oauth2_client_secret name: raptor - image: us.gcr.io/zenhub-public/raptor-backend:zhe-4.0.2 + image: us.gcr.io/zenhub-public/raptor-backend:zhe-4.1.0 imagePullPolicy: Always name: data-migration resources: diff --git a/k8s-cluster/update/zhe-upgrade.sh b/k8s-cluster/update/zhe-upgrade.sh index 56d7d63..1c19462 100755 --- a/k8s-cluster/update/zhe-upgrade.sh +++ b/k8s-cluster/update/zhe-upgrade.sh @@ -50,9 +50,9 @@ raptor-sidekiq-worker raptor-sidekiq-worker-default raptor-admin raptor-cable +raptor-webhook kraken-webapp toad-worker -toad-webhook admin-ui toad-cron raptor-api @@ -61,11 +61,13 @@ toad-websocket devsite pgbouncer raptor-api-public +toad-webhook ) HPA=( raptor-api raptor-cable +raptor-webhook toad-api toad-websocket toad-webhook @@ -81,10 +83,11 @@ kraken-webapp nginx-gateway raptor-admin raptor-api +raptor-webhook raptor-cable toad-api -toad-webhook toad-websocket +toad-webhook ) CACHES=( @@ -143,16 +146,16 @@ echo "###############################################" echo " Starting Data Migration Job" echo "###############################################" -echo " Scaling up workers..." -kubectl -n $NAMESPACE scale deployments/raptor-sidekiq-worker --replicas=2 - -kubectl -n $NAMESPACE wait --for=condition=available deployment/raptor-sidekiq-worker --timeout=300s - echo " Scaling up pgbouncer..." kubectl -n $NAMESPACE scale deployments/pgbouncer --replicas=1 kubectl -n $NAMESPACE wait --for=condition=available deployment/pgbouncer --timeout=300s +echo " Scaling up workers..." +kubectl -n $NAMESPACE scale deployments/raptor-sidekiq-worker --replicas=2 + +kubectl -n $NAMESPACE wait --for=condition=available deployment/raptor-sidekiq-worker --timeout=300s + echo " Updating data..." kubectl -n $NAMESPACE apply -f batch_v1_job_data_migration.yaml @@ -176,7 +179,7 @@ do done echo " Deleting Gateway" -kubectl -n $NAMESPACE delete deployment/nginx-gateway +kubectl -n $NAMESPACE delete deployment/nginx-gateway --ignore-not-found echo " Deleting Services" for s in "${SERVICES[@]}" diff --git a/virtual-machine/README.md b/virtual-machine/README.md index 1738039..4feeb21 100644 --- a/virtual-machine/README.md +++ b/virtual-machine/README.md @@ -35,9 +35,9 @@ - [4.3 Application Check](#43-application-check) - [4.4 Publish the Chrome and Firefox Extensions](#44-publish-the-chrome-and-firefox-extensions) - [5. Upgrades](#5-upgrades) - - [5.1 Application Updates](#51-application-updates) - - [5.1.1 Update](#511-update) - - [5.1.2 Rollback](#512-rollback) + - [5.1 Prerequisites](#51-prerequisites) + - [5.2 Preparing to Upgrade](#52-preparing-to-upgrade) + - [5.3 Upgrading](#53-upgrading) - [6. Maintenance and Operational Tasks](#6-maintenance-and-operational-tasks) - [6.1 Tasks in the Admin UI](#61-tasks-in-the-admin-ui) - [6.1.1 Publishing the Chrome and Firefox Extensions](#611-publishing-the-chrome-and-firefox-extensions) @@ -79,6 +79,9 @@ - [10.4 Azure Active Directory](#104-azure-active-directory) - [10.5 LDAP](#105-ldap) - [10.6 SAML](#106-saml) +- [11. Integrations](#11-integrations) + - [11.1 Notion](#111-notion) + ## 1. Getting Started This README will be your guide to setting up Zenhub as a virtual machine. If you currently run a Kubernetes cluster and would prefer to set Zenhub up there, please go back to the [**k8s-cluster**](https://github.com/ZenhubHQ/zenhub-enterprise/tree/master/k8s-cluster) folder. If this is your first time using Zenhub On-Premise, please get in touch with us at https://www.zenhub.com/enterprise and join us in our [Community](https://help.zenhub.com/support/solutions/articles/43000556746-zenhub-users-slack-community) so that we can provide you with additional support. @@ -249,6 +252,10 @@ zenhub_configuration: # GRAPHQL_RUNTIME_LIMIT: # REST_API_REQUEST_LIMIT: # REST_API_TIME_LIMIT: +## (Optional) Configure Notion Integration + # NOTION_ENABLED: + # NOTION_CLIENT_ID: + # NOTION_CLIENT_SECRET: ## (Optional) Configure built-in email/password authentication # AUTHV2_EMAIL_PW_ENABLED: true ## (Optional) Configure W3ID as an authentication provider @@ -372,6 +379,12 @@ Zenhub Enterprise 4.0 and greater has an optional built-in email/password authen - `AUTHV2_SAML_IDP_METADATA_URL`: Metadata URL linking to the identity provider's SAML config - `AUTHV2_SAML_SP_ENTITY_ID`: Entity ID of the service provider +##### Notion Integration + +- `NOTION_ENABLED`: Enables the Notion integration +- `NOTION_CLIENT_ID`: The OAuth client ID from notion.so/my-integrations +- `NOTION_CLIENT_SECRET`: The OAuth client secret from notion.so/my-integrations + #### VM Configuration These should be in their own sections, not nested under zenhub_configuration. Commented examples are present in the file example above. @@ -450,51 +463,55 @@ See section [6.1.1](#611-publishing-the-chrome-and-firefox-extensions) for instr ## 5. Upgrades -Upgrading Zenhub is important for both stability and security. We suggest updating Zenhub _at least_ once every 12 months to avoid issues related to outdated software. +Zenhub Enterprise Server is continuously evolving, bringing in new features and resolving bugs through feature updates and patch releases. You are responsible for upgrading your instance. -### 5.1 Application Updates +We highly suggest updating your instance _at least_ once every 12 months to avoid issues related to outdated software. -#### 5.1.1 Update +### 5.1 Prerequisites -Update Docker images and Kubernetes manifests for the Zenhub application. +To successfully upgrade, ensure you have sufficient disk space available on your VM. Your disk should be at least 20% free, and you want to ensure you do not reach 90% utilization after the upgrade as this can cause adverse affects on the stability of Zenhub Enterprise. You can check the [Disk Management](#67-disk-management) documentation to see how to check and increase disk space if needed. -> ⚠️ **NOTE:** It is strongly encouraged to take a machine-level snapshot before updating ZHE +### 5.2 Preparing to Upgrade -> Before updating, perform a data backup `zhe-config --backup` +1. Before upgrading, you should always check the release notes for the version you are upgrading to. You can find the release notes for each version in the [releases](https://github.com/ZenHubHQ/zenhub-enterprise/releases) section of this repository. You must ensure you are upgrading to a version that is compatible with your GitHub Enterprise Server version. The release notes list the GitHub Enterprise Server versions that are compatible with each Zenhub Enterprise release. -1. Download the latest Zenhub application update bundle from the link provided in the release email (or [contact our team](mailto:enterprise@zenhub.com)): +2. To obtain an upgrade bundle, [contact our team](mailto:enterprise@zenhub.com) to get the link. You can download the file to your instance directly using `curl`: ```bash curl -o zhe_upgrade.run "" ``` +If you cannot download the file directly to your instance, you can download it to your local machine and then upload it to your instance using `scp` or your choice of tool. + > ⚠️ **NOTE:** The file integrity is checked automatically via an integrated **checksum** when the upgrade is run. If you want to check the file integrity without running the upgrade, run `bash zhe_upgrade.run --check` -2. If not already directly downloaded to the VM, move the bundle into the VM (use `scp` or your choice of tool). +3. Finally, **we highly recommend taking a VM or disk snapshot of your VM**. This will allow you to roll back to the previous version if you encounter any issues during or after the upgrade. To ensure data integrity, we recommend taking a snapshot of your VM while it is powered off, or while the Zenhub application is in maintenance mode to ensure background jobs are not running. + +> ⚠️ **NOTE:** If your hypervisor does not support VM snapshots, you can take a snapshot of the disk instead. This will allow you to restore the disk to the previous state if needed. + +### 5.3 Upgrading + +Once you are ready to upgrade, follow the steps below. These instructions include the use of the `tmux` terminal multiplexer to ensure the upgrade is not interrupted by a network disconnect. -3. Run the upgrade script: +1. Create a new tmux session named "upgrade" and run the upgrade bundle while inside the tmux session: ```bash -bash zhe_upgrade.run +tmux new-session -s "upgrade" "bash zhe_upgrade.run" ``` -> ⚠️ **NOTE:** To protect the upgrade from SSH disconnects, you may want to run the upgrade inside `tmux` or `screen`, e.g. `tmux new-session -s "upgrade" "bash zhe_upgrade.run"` -4. Wait for Zenhub to update and then confirm that it has updated successfully by checking the version number on the root page of the application. If you observe any problems with Zenhub after the update, you can follow the [Rollback](#512-rollback) steps below. Otherwise, proceed to the next step. +If you become disconnected from the VM during the upgrade, you can reconnect to the tmux session by running: `tmux attach-session -t upgrade` -5. Publish an update to the Chrome and Firefox extensions. See section [6.1.1](#611-publishing-the-chrome-and-firefox-extensions) for more information. +You can manually detach from the tmux session by pressing: `Ctrl+b` and then `d`. The upgrade will continue to run in the background. -#### 5.1.2 Rollback +You can list all tmux sessions by running: `tmux ls` -If you have any problems with Zenhub after installing an update, you can quickly roll back to your most recent application version using the automated application backup taken at the start of your upgrade or to a machine-level snapshot if one was taken. +2. Once the upgrade is complete, be sure you are connected to the "upgrade" tmux session and then end the tmux session by running: `exit` -> ⚠️ **NOTE:** If you have already published the extensions after updating, rolling back the application may break your extensions. - -1. Locate your desired backup found within `/opt/zenhub/upgrade_backup/` -2. Run the following command from the same directory as your latest upgrade bundle: +> ⚠️ **NOTE:** If you observe any problems during or after the upgrade, please record the problem and [contact our team](mailto:enterprise@zenhub.com) with the details, including your upgrade log which can be found as `/var/log/zenhub/zhe-upgrade-`. Then, feel free to revert your instance to the previous version using the VM or disk snapshot you took before the upgrade. +> +> If you are unable to use a VM or disk snapshot to revert, there is a data backup created during the start of the upgrade process that you can use to restore your instance to the previous version. You can find the backup in `/opt/snapshots` timestamped with the date the upgrade was run. In this situation, please [contact our team](mailto:enterprise@zenhub.com) for assistance. -```bash -bash zhe_upgrade.run rollback /opt/zenhub/upgrade_backup/.tar.gz -``` +3. Publish an update to the Chrome and Firefox extensions. See section [6.1.1](#611-publishing-the-chrome-and-firefox-extensions) for more information. ## 6. Maintenance and Operational Tasks @@ -1009,7 +1026,7 @@ If you wish to remove your log aggregator setup and revert to our default out-of 1. Undo the changes made in section 6.1.3 - Set fluentdconf to be `fluentd.conf` - - Run `kustomize edit set image fluentd=us.gcr.io/zenhub-public/fluentd:zhe-4.0.2` + - Run `kustomize edit set image fluentd=us.gcr.io/zenhub-public/fluentd:zhe-4.1.0` 2. Perform the steps in section 6.1.4 ## 9. Developer Site @@ -1078,3 +1095,9 @@ Of the authentication methods listed below, the only one that is enabled by defa Service Provider Attribute Name mappings for the following attributes: - **Email**: `email` - **Name**: `name` + +## 11. Integrations + +### 11.1 Notion + +Zenhub Enterprise Server can be integrated with Notion to allow users to preview Notion links within Zenhub Issues. This integration is disabled by default and can be enabled by setting the [Notion Integration](#notion-integration) optional configuration. \ No newline at end of file