From 3ba5211201428dff247e7dfa64c6b10658b03dce Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Fri, 7 Nov 2025 17:55:10 -0500 Subject: [PATCH] remove unused assemblies --- architecture/argocd.adoc | 25 -- .../installing/data-mover-intro.adoc | 29 -- ...sing-data-mover-for-csi-snapshots-doc.adoc | 276 ------------------ getting_started/accessing-your-services.adoc | 30 -- getting_started/dedicated-networking.adoc | 10 - getting_started/deleting-your-cluster.adoc | 16 - getting_started/scaling-your-cluster.adoc | 9 - .../install_config/configuring-custom-ca.adoc | 12 - ...manually-creating-iam-azure-stack-hub.adoc | 33 --- ...stalling-openstack-installer-ovs-dpdk.adoc | 87 ------ .../installing-openstack-installer.adoc | 51 ---- .../installing-openstack-load-balancing.adoc | 9 - .../installing-openstack-troubleshooting.adoc | 53 ---- ...nstalling-openstack-user-sr-iov-kuryr.adoc | 88 ------ .../installing-openstack-user-sr-iov.adoc | 103 ------- ...ng-restricted-networks-vmc-user-infra.adoc | 156 ---------- ...vmc-network-customizations-user-infra.adoc | 139 --------- .../installing-vmc-user-infra.adoc | 146 --------- .../cluster-api-disabling.adoc | 13 - .../cluster-api-resiliency.adoc | 13 - .../metering-about-configuring.adoc | 20 -- .../metering-common-config-options.adoc | 175 ----------- ...ing-configure-aws-billing-correlation.adoc | 116 -------- .../metering-configure-hive-metastore.adoc | 18 -- ...metering-configure-persistent-storage.adoc | 22 -- ...metering-configure-reporting-operator.adoc | 16 - metering/metering-about-metering.adoc | 12 - metering/metering-installing-metering.adoc | 62 ---- .../metering-troubleshooting-debugging.adoc | 21 -- metering/metering-uninstall.adoc | 31 -- metering/metering-usage-examples.adoc | 22 -- metering/metering-using-metering.adoc | 19 -- metering/reports/metering-about-reports.adoc | 16 - .../reports/metering-storage-locations.adoc | 83 ------ ...-nmstate-observing-node-network-state.adoc | 14 - .../configuring-gateway.adoc | 14 - ...ng-cookies-to-keep-route-statefulness.adoc | 28 -- .../deploy-heterogeneous-configuration.adoc | 30 -- .../certificate-types-descriptions-index.adoc | 13 - .../rosa-persistent-storage-aws-efs-csi.adoc | 84 ------ support/osd-managed-resources.adoc | 54 ---- .../recovering-update-before-applied.adoc | 11 - .../restoring-cluster-previous-state.adoc | 9 - .../updating-restricted-network-cluster.adoc | 216 -------------- welcome/about-hcp.adoc | 100 ------- 45 files changed, 2504 deletions(-) delete mode 100644 architecture/argocd.adoc delete mode 100644 backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc delete mode 100644 backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc delete mode 100644 getting_started/accessing-your-services.adoc delete mode 100644 getting_started/dedicated-networking.adoc delete mode 100644 getting_started/deleting-your-cluster.adoc delete mode 100644 getting_started/scaling-your-cluster.adoc delete mode 100644 installing/install_config/configuring-custom-ca.adoc delete mode 100644 installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc delete mode 100644 installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc delete mode 100644 installing/installing_openstack/installing-openstack-installer.adoc delete mode 100644 installing/installing_openstack/installing-openstack-load-balancing.adoc delete mode 100644 installing/installing_openstack/installing-openstack-troubleshooting.adoc delete mode 100644 installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc delete mode 100644 installing/installing_openstack/installing-openstack-user-sr-iov.adoc delete mode 100644 installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc delete mode 100644 installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc delete mode 100644 installing/installing_vmc/installing-vmc-user-infra.adoc delete mode 100644 machine_management/cluster_api_machine_management/cluster-api-disabling.adoc delete mode 100644 machine_management/cluster_api_machine_management/cluster-api-resiliency.adoc delete mode 100644 metering/configuring_metering/metering-about-configuring.adoc delete mode 100644 metering/configuring_metering/metering-common-config-options.adoc delete mode 100644 metering/configuring_metering/metering-configure-aws-billing-correlation.adoc delete mode 100644 metering/configuring_metering/metering-configure-hive-metastore.adoc delete mode 100644 metering/configuring_metering/metering-configure-persistent-storage.adoc delete mode 100644 metering/configuring_metering/metering-configure-reporting-operator.adoc delete mode 100644 metering/metering-about-metering.adoc delete mode 100644 metering/metering-installing-metering.adoc delete mode 100644 metering/metering-troubleshooting-debugging.adoc delete mode 100644 metering/metering-uninstall.adoc delete mode 100644 metering/metering-usage-examples.adoc delete mode 100644 metering/metering-using-metering.adoc delete mode 100644 metering/reports/metering-about-reports.adoc delete mode 100644 metering/reports/metering-storage-locations.adoc delete mode 100644 networking/k8s_nmstate/k8s-nmstate-observing-node-network-state.adoc delete mode 100644 networking/ovn_kubernetes_network_provider/configuring-gateway.adoc delete mode 100644 networking/using-cookies-to-keep-route-statefulness.adoc delete mode 100644 post_installation_configuration/deploy-heterogeneous-configuration.adoc delete mode 100644 security/certificate_types_descriptions/certificate-types-descriptions-index.adoc delete mode 100644 storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc delete mode 100644 support/osd-managed-resources.adoc delete mode 100644 updating/troubleshooting_updates/recovering-update-before-applied.adoc delete mode 100644 updating/troubleshooting_updates/restoring-cluster-previous-state.adoc delete mode 100644 updating/updating-restricted-network-cluster.adoc delete mode 100644 welcome/about-hcp.adoc diff --git a/architecture/argocd.adoc b/architecture/argocd.adoc deleted file mode 100644 index ede48546c22b..000000000000 --- a/architecture/argocd.adoc +++ /dev/null @@ -1,25 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="argocd"] -= Using ArgoCD with {product-title} -include::_attributes/common-attributes.adoc[] - -:context: argocd - -toc::[] - -[id="argocd-what"] -== What does ArgoCD do? - -ArgoCD is a declarative continuous delivery tool that leverages GitOps to maintain cluster resources. ArgoCD is implemented as a controller that continuously monitors application definitions and configurations defined in a Git repository and compares the specified state of those configurations with their live state on the cluster. Configurations that deviate from their specified state in the Git repository are classified as OutOfSync. ArgoCD reports these differences and allows administrators to automatically or manually resync configurations to the defined state. - -ArgoCD enables you to deliver global custom resources, like the resources that are used to configure {product-title} clusters. - -[id="argocd-support"] -== Statement of support - -Red Hat does not provide support for this tool. To obtain support for ArgoCD, see link:https://argoproj.github.io/argo-cd/SUPPORT/[Support] in the ArgoCD documentation. - -[id="argocd-documentation"] -== ArgoCD documentation - -For more information about using ArgoCD, see the link:https://argoproj.github.io/argo-cd/[ArgoCD documentation]. diff --git a/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc b/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc deleted file mode 100644 index 38a88f9dc4b7..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/data-mover-intro.adoc +++ /dev/null @@ -1,29 +0,0 @@ -:_mod-docs-content-type: CONCEPT -[id="oadp-data-mover-intro"] -= OADP Data Mover Introduction -include::_attributes/common-attributes.adoc[] -:context: data-mover - -toc::[] - -OADP Data Mover allows you to restore stateful applications from the store if a failure, accidental deletion, or corruption of the cluster occurs. - -:FeatureName: The OADP 1.2 Data Mover -include::snippets/technology-preview.adoc[leveloffset=+1] - -* You can use OADP Data Mover to back up Container Storage Interface (CSI) volume snapshots to a remote object store. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using Data Mover for CSI snapshots]. - -* You can use OADP 1.2 Data Mover to back up and restore application data for clusters that use CephFS, CephRBD, or both. See xref:../../../backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc#oadp-using-data-mover-for-csi-snapshots-doc[Using OADP 1.2 Data Mover with Ceph storage]. - -include::snippets/snip-post-mig-hook[] - -[id="oadp-data-mover-prerequisites"] -== OADP Data Mover prerequisites - -* You have a stateful application running in a separate namespace. - -* You have installed the OADP Operator by using Operator Lifecycle Manager (OLM). - -* You have created an appropriate `VolumeSnapshotClass` and `StorageClass`. - -* You have installed the VolSync operator using OLM. diff --git a/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc b/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc deleted file mode 100644 index 86966993a943..000000000000 --- a/backup_and_restore/application_backup_and_restore/installing/oadp-using-data-mover-for-csi-snapshots-doc.adoc +++ /dev/null @@ -1,276 +0,0 @@ -// Module included in the following assemblies: -// -// * backup_and_restore/application_backup_and_restore/backing_up_and_restoring/backing-up-applications.adoc - -:_mod-docs-content-type: PROCEDURE -[id="oadp-using-data-mover-for-csi-snapshots-doc"] -= Using Data Mover for CSI snapshots -include::_attributes/common-attributes.adoc[] -:context: backing-up-applications - -toc::[] - -:FeatureName: Data Mover for CSI snapshots - -The OADP Data Mover enables customers to back up Container Storage Interface (CSI) volume snapshots to a remote object store. When Data Mover is enabled, you can restore stateful applications, using CSI volume snapshots pulled from the object store if a failure, accidental deletion, or corruption of the cluster occurs. - -The Data Mover solution uses the Restic option of VolSync. - -Data Mover supports backup and restore of CSI volume snapshots only. - -In OADP 1.2 Data Mover `VolumeSnapshotBackups` (VSBs) and `VolumeSnapshotRestores` (VSRs) are queued using the VolumeSnapshotMover (VSM). The VSM's performance is improved by specifying a concurrent number of VSBs and VSRs simultaneously `InProgress`. After all async plugin operations are complete, the backup is marked as complete. - - -:FeatureName: The OADP 1.2 Data Mover -include::snippets/technology-preview.adoc[leveloffset=+1] - -[NOTE] -==== -Red Hat recommends that customers who use OADP 1.2 Data Mover in order to back up and restore ODF CephFS volumes, upgrade or install {product-title} version 4.12 or later for improved performance. OADP Data Mover can leverage CephFS shallow volumes in {product-title} version 4.12 or later, which based on our testing, can improve the performance of backup times. - -* https://issues.redhat.com/browse/RHSTOR-4287[CephFS ROX details] -//* https://github.com/ceph/ceph-csi/blob/devel/docs/cephfs-snapshot-backed-volumes.md[Provisioning and mounting CephFS snapshot-backed volumes] - - -//For more information about OADP 1.2 with CephS [name of topic], see ___. - -==== - -.Prerequisites - -* You have verified that the `StorageClass` and `VolumeSnapshotClass` custom resources (CRs) support CSI. - -* You have verified that only one `VolumeSnapshotClass` CR has the annotation `snapshot.storage.kubernetes.io/is-default-class: "true"`. -+ -[NOTE] -==== -In {product-title} version 4.12 or later, verify that this is the only default `VolumeSnapshotClass`. -==== - -* You have verified that `deletionPolicy` of the `VolumeSnapshotClass` CR is set to `Retain`. - -* You have verified that only one `StorageClass` CR has the annotation `storageclass.kubernetes.io/is-default-class: "true"`. - -* You have included the label `{velero-domain}/csi-volumesnapshot-class: "true"` in your `VolumeSnapshotClass` CR. - -* You have verified that the `OADP namespace` has the annotation `oc annotate --overwrite namespace/openshift-adp volsync.backube/privileged-movers="true"`. -+ -[NOTE] -==== -In OADP 1.2 the `privileged-movers` setting is not required in most scenarios. The restoring container permissions should be adequate for the Volsync copy. In some user scenarios, there may be permission errors that the `privileged-mover`= `true` setting should resolve. -==== - -* You have installed the VolSync Operator by using the Operator Lifecycle Manager (OLM). -+ -[NOTE] -==== -The VolSync Operator is required for using OADP Data Mover. -==== - -* You have installed the OADP operator by using OLM. -+ --- -include::snippets/xfs-filesystem-snippet.adoc[] --- - -.Procedure - -. Configure a Restic secret by creating a `.yaml` file as following: -+ -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: - namespace: openshift-adp -type: Opaque -stringData: - RESTIC_PASSWORD: ----- -+ -[NOTE] -==== -By default, the Operator looks for a secret named `dm-credential`. If you are using a different name, you need to specify the name through a Data Protection Application (DPA) CR using `dpa.spec.features.dataMover.credentialName`. -==== - -. Create a DPA CR similar to the following example. The default plugins include CSI. -+ -.Example Data Protection Application (DPA) CR -[source,yaml] ----- -apiVersion: oadp.openshift.io/v1alpha1 -kind: DataProtectionApplication -metadata: - name: velero-sample - namespace: openshift-adp -spec: - backupLocations: - - velero: - config: - profile: default - region: us-east-1 - credential: - key: cloud - name: cloud-credentials - default: true - objectStorage: - bucket: - prefix: - provider: aws - configuration: - restic: - enable: - velero: - itemOperationSyncFrequency: "10s" - defaultPlugins: - - openshift - - aws - - csi - - vsm - features: - dataMover: - credentialName: restic-secret - enable: true - maxConcurrentBackupVolumes: "3" <1> - maxConcurrentRestoreVolumes: "3" <2> - pruneInterval: "14" <3> - volumeOptions: <4> - sourceVolumeOptions: - accessMode: ReadOnlyMany - cacheAccessMode: ReadWriteOnce - cacheCapacity: 2Gi - destinationVolumeOptions: - storageClass: other-storageclass-name - cacheAccessMode: ReadWriteMany - snapshotLocations: - - velero: - config: - profile: default - region: us-west-2 - provider: aws - ----- -<1> Optional: Specify the upper limit of the number of snapshots allowed to be queued for backup. The default value is `10`. -<2> Optional: Specify the upper limit of the number of snapshots allowed to be queued for restore. The default value is `10`. -<3> Optional: Specify the number of days between running Restic pruning on the repository. The prune operation repacks the data to free space, but it can also generate significant I/O traffic as a part of the process. Setting this option allows a trade-off between storage consumption, from no longer referenced data, and access costs. -<4> Optional: Specify VolumeSync volume options for backup and restore. - -+ -The OADP Operator installs two custom resource definitions (CRDs), `VolumeSnapshotBackup` and `VolumeSnapshotRestore`. -+ -.Example `VolumeSnapshotBackup` CRD -[source,yaml] ----- -apiVersion: datamover.oadp.openshift.io/v1alpha1 -kind: VolumeSnapshotBackup -metadata: - name: - namespace: <1> -spec: - volumeSnapshotContent: - name: - protectedNamespace: <2> - resticSecretRef: - name: ----- -<1> Specify the namespace where the volume snapshot exists. -<2> Specify the namespace where the OADP Operator is installed. The default is `openshift-adp`. -+ -.Example `VolumeSnapshotRestore` CRD -[source,yaml] ----- -apiVersion: datamover.oadp.openshift.io/v1alpha1 -kind: VolumeSnapshotRestore -metadata: - name: - namespace: <1> -spec: - protectedNamespace: <2> - resticSecretRef: - name: - volumeSnapshotMoverBackupRef: - sourcePVCData: - name: - size: - resticrepository: - volumeSnapshotClassName: ----- -<1> Specify the namespace where the volume snapshot exists. -<2> Specify the namespace where the OADP Operator is installed. The default is `openshift-adp`. - -. You can back up a volume snapshot by performing the following steps: - -.. Create a backup CR: -+ -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Backup -metadata: - name: - namespace: <1> -spec: - includedNamespaces: - - <2> - storageLocation: velero-sample-1 ----- -<1> Specify the namespace where the Operator is installed. The default namespace is `openshift-adp`. -<2> Specify the application namespace or namespaces to be backed up. - -.. Wait up to 10 minutes and check whether the `VolumeSnapshotBackup` CR status is `Completed` by entering the following commands: -+ -[source,terminal] ----- -$ oc get vsb -n ----- -+ -[source,terminal] ----- -$ oc get vsb -n -o jsonpath="{.status.phase}" ----- -+ -A snapshot is created in the object store was configured in the DPA. -+ -[NOTE] -==== -If the status of the `VolumeSnapshotBackup` CR becomes `Failed`, refer to the Velero logs for troubleshooting. -==== - -. You can restore a volume snapshot by performing the following steps: - -.. Delete the application namespace and the `VolumeSnapshotContent` that was created by the Velero CSI plugin. - -.. Create a `Restore` CR and set `restorePVs` to `true`. -+ -.Example `Restore` CR -[source,yaml] ----- -apiVersion: velero.io/v1 -kind: Restore -metadata: - name: - namespace: -spec: - backupName: - restorePVs: true ----- - -.. Wait up to 10 minutes and check whether the `VolumeSnapshotRestore` CR status is `Completed` by entering the following command: -+ -[source,terminal] ----- -$ oc get vsr -n ----- -+ -[source,terminal] ----- -$ oc get vsr -n -o jsonpath="{.status.phase}" ----- - -.. Check whether your application data and resources have been restored. -+ -[NOTE] -==== -If the status of the `VolumeSnapshotRestore` CR becomes 'Failed', refer to the Velero logs for troubleshooting. -==== diff --git a/getting_started/accessing-your-services.adoc b/getting_started/accessing-your-services.adoc deleted file mode 100644 index b3eaef5daff2..000000000000 --- a/getting_started/accessing-your-services.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="accessing-your-services"] -= Accessing your services -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -Once you have an {product-title} subscription, you can access your services. - -include::modules/dedicated-creating-your-cluster.adoc[leveloffset=+1] - -include::modules/dedicated-accessing-your-cluster.adoc[leveloffset=+1] - -//// - -== Receiving status updates - -Access the status portal at link:https://status-dedicated.openshift.com[]. You -can also subscribe to notifications via email, SMS, or RSS by changing your -preferences in the status portal. - -//// - -== Requesting support - -If you have questions about your environment or must open a support ticket, -you can open or view a support case in the -link:https://access.redhat.com/support/cases/#/case/list[Red Hat Customer -Portal]. diff --git a/getting_started/dedicated-networking.adoc b/getting_started/dedicated-networking.adoc deleted file mode 100644 index 11e2c788ec20..000000000000 --- a/getting_started/dedicated-networking.adoc +++ /dev/null @@ -1,10 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="dedicated-networking"] -= Neworking -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -include::modules/dedicated-configuring-your-application-routes.adoc[leveloffset=+1] -include::modules/dedicated-exposing-TCP-services.adoc[leveloffset=+1] diff --git a/getting_started/deleting-your-cluster.adoc b/getting_started/deleting-your-cluster.adoc deleted file mode 100644 index a9de4d412acf..000000000000 --- a/getting_started/deleting-your-cluster.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="deleting-your-cluster"] -= Deleting your cluster -include::_attributes/common-attributes.adoc[] -:context: access - -To delete your {product-title} cluster: - -. From link:https://console.redhat.com/openshift[console.redhat.com/openshift], click - on the cluster you want to delete. - -. Click the *Actions* button, then *Delete Cluster*. - -. Type the name of the cluster highlighted in bold, then click *Delete*. - -Cluster deletion occurs automatically. diff --git a/getting_started/scaling-your-cluster.adoc b/getting_started/scaling-your-cluster.adoc deleted file mode 100644 index 0075df9bbc5f..000000000000 --- a/getting_started/scaling-your-cluster.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="scaling-your-cluster"] -= Scaling your cluster -include::_attributes/common-attributes.adoc[] -:context: access - -toc::[] - -include::modules/dedicated-scaling-your-cluster.adoc[leveloffset=+1] diff --git a/installing/install_config/configuring-custom-ca.adoc b/installing/install_config/configuring-custom-ca.adoc deleted file mode 100644 index 3824929858ab..000000000000 --- a/installing/install_config/configuring-custom-ca.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-custom-ca"] -= Configuring a custom certificate authority -include::_attributes/common-attributes.adoc[] -:context: configuring-custom-ca - -toc::[] - -If you install {product-title} with a proxy or in a restricted network, -you might need to configure a custom certificate authority (CA). - -//include::modules/configuring-firewall.adoc[leveloffset=+1] diff --git a/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc b/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc deleted file mode 100644 index 71e0e9d4ca3a..000000000000 --- a/installing/installing_azure_stack_hub/manually-creating-iam-azure-stack-hub.adoc +++ /dev/null @@ -1,33 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="manually-creating-iam-azure-stack-hub"] -= Manually creating IAM for Azure Stack Hub -include::_attributes/common-attributes.adoc[] -:context: manually-creating-iam-azure-stack-hub - -toc::[] - -In environments where the cloud identity and access management (IAM) APIs are not reachable, you must put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. - -//// -In environments where the cloud identity and access management (IAM) APIs are not reachable, or the administrator prefers not to store an administrator-level credential secret in the cluster `kube-system` namespace, you can put the Cloud Credential Operator (CCO) into manual mode before you install the cluster. -//// -// Until ASH supports other credential scenarios besides manual mode, the tone for this article will be manual mode use only. - -include::modules/alternatives-to-storing-admin-secrets-in-kube-system.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* For a detailed description of all available CCO credential modes and their supported platforms, see xref:../../authentication/managing_cloud_provider_credentials/about-cloud-credential-operator.adoc#about-cloud-credential-operator[About the Cloud Credential Operator]. - -include::modules/manually-create-identity-access-management.adoc[leveloffset=+1] - -// I was going to update this but I think the assembly is no longer used and will ask install team if I can get rid of it entirely. -include::modules/manually-maintained-credentials-upgrade.adoc[leveloffset=+1] - -[id="next-steps_manually-creating-iam-azure-stack-hub"] -== Next steps - -* Install an {product-title} cluster: -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-default.adoc#installing-azure-stack-hub-default[Installing a cluster quickly on Azure Stack Hub]. -** xref:../../installing/installing_azure_stack_hub/installing-azure-stack-hub-user-infra.adoc#installing-azure-stack-hub-user-infra[Installing a cluster on Azure Stack Hub using ARM templates]. diff --git a/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc b/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc deleted file mode 100644 index 70626188b8e7..000000000000 --- a/installing/installing_openstack/installing-openstack-installer-ovs-dpdk.adoc +++ /dev/null @@ -1,87 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-openstack-installer-ovs-dpdk"] -= Installing a cluster on OpenStack that supports OVS-DPDK-connected compute machines -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer-ovs-dpdk - -toc::[] - -If your {rh-openstack-first} deployment has Open vSwitch with the Data Plane Development Kit (OVS-DPDK) enabled, you can install an {product-title} cluster on it. Clusters that run on such {rh-openstack} deployments use OVS-DPDK features by providing access to link:https://doc.dpdk.org/guides/prog_guide/poll_mode_drv.html[poll mode drivers]. - -== Prerequisites - -* Review details about the -xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] -processes. -** Verify that {product-title} {product-version} is compatible with your {rh-openstack} version by using the "Supported platforms for OpenShift clusters" section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. - -* Have a storage service installed in {rh-openstack}, like block storage (Cinder) or object storage (Swift). Object storage is the recommended storage technology for {product-registry} cluster deployment. For more information, see xref:../../scalability_and_performance/optimization/optimizing-storage.adoc#optimizing-storage[Optimizing storage]. - -* Have the metadata service enabled in {rh-openstack}. - -* Plan your {rh-openstack} OVS-DPDK deployment by referring to link:https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html-single/network_functions_virtualization_planning_and_configuration_guide/index#plan-ovs-dpdk-deploy_rhosp-nfv[Planning your OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -* Configure your {rh-openstack} OVS-DPDK deployment according to link:https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html-single/network_functions_virtualization_planning_and_configuration_guide/index#configure-dpdk-deploy_rhosp-nfv[Configuring an OVS-DPDK deployment] in the Network Functions Virtualization Planning and Configuration Guide. - -** You must complete link:https://docs.redhat.com/en/documentation/red_hat_openstack_platform/16.2/html-single/network_functions_virtualization_planning_and_configuration_guide/index#create-flavor-deploy-instance-ovsdpdk_cfgdpdk-nfv[Creating a flavor and deploying an instance for OVS-DPDK] before you install a cluster on {rh-openstack}. - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_openstack/installation-config-parameters-openstack.adoc#installation-config-parameters-openstack[Installation configuration parameters for OpenStack] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -The cluster is operational. Before you can add OVS-DPDK compute machines though, you must perform additional tasks. - -include::modules/networking-osp-enabling-metadata.adoc[leveloffset=+1] -include::modules/networking-osp-enabling-vfio-noiommu.adoc[leveloffset=+1] -include::modules/installation-osp-dpdk-binding-vfio-pci.adoc[leveloffset=+1] -include::modules/installation-osp-dpdk-exposing-host-interface.adoc[leveloffset=+1] - -.Additional resources - -* xref:../../networking/multiple_networks/configuring-additional-network.adoc#nw-multus-host-device-object_configuring-additional-network[Creating an additional network attachment with the Cluster Network Operator] - -The cluster is installed and prepared for configuration. You must now perform the OVS-DPDK configuration tasks in <>. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="additional-resources_installing-openstack-installer-ovs-dpdk"] -== Additional resources -* xref:../../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-understanding-low-latency_cnf-master[Low latency tuning of OpenShift Container Platform nodes] - -[id="next-steps_installing-openstack-installer-ovs-dpdk"] -== Next steps - -* To complete OVS-DPDK configuration for your cluster, xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Configure huge pages support] -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] -* xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[Configure ingress cluster traffic by using a node port] -* xref:../../installing/installing_openstack/installing-openstack-network-config.adoc#installation-osp-configuring-api-floating-ip_installing-openstack-network-config[Configure {rh-openstack} access with floating IP addresses] diff --git a/installing/installing_openstack/installing-openstack-installer.adoc b/installing/installing_openstack/installing-openstack-installer.adoc deleted file mode 100644 index bd2472c36d93..000000000000 --- a/installing/installing_openstack/installing-openstack-installer.adoc +++ /dev/null @@ -1,51 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-openstack-installer"] -= Installing a cluster on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-installer - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on -{rh-openstack-first}. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* On {rh-openstack}, you have access to an external network that does not overlap these CIDR ranges: -** `10.0.0.0/16` -** `172.30.0.0/16` -** `10.128.0.0/14` -+ -If the external network overlaps these ranges, go to xref:./installing-openstack-installer-custom.adoc#installing-openstack-installer-custom[Installing a cluster on OpenStack with customizations] - -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-launching-installer.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-cluster-status.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../web_console/web-console.adoc#web-console[Accessing the web console] for more details about accessing and understanding the {product-title} web console. - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] diff --git a/installing/installing_openstack/installing-openstack-load-balancing.adoc b/installing/installing_openstack/installing-openstack-load-balancing.adoc deleted file mode 100644 index b161ca7b1650..000000000000 --- a/installing/installing_openstack/installing-openstack-load-balancing.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-openstack-load-balancing"] -= Load balancing deployments on OpenStack -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-load-balancing - -toc::[] - -include::modules/installation-osp-balancing-external-loads.adoc[leveloffset=+1] diff --git a/installing/installing_openstack/installing-openstack-troubleshooting.adoc b/installing/installing_openstack/installing-openstack-troubleshooting.adoc deleted file mode 100644 index 03fc4fe1ea44..000000000000 --- a/installing/installing_openstack/installing-openstack-troubleshooting.adoc +++ /dev/null @@ -1,53 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-openstack-troubleshooting"] -= Troubleshooting -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-troubleshooting - -toc::[] - -//Very much a WIP. Chop up sections into mod docs as they're finalized. - -In the event of a failure in {product-title} on OpenStack installation, you can recover by understanding the likely failure modes and then starting to troubleshoot the problem. - -== View OpenStack instance logs - -== Prerequisites - -* OpenStack CLI tools are installed - -.Procedure - -. In a terminal window, run `openstack console log show ` - -The console logs appear. - -== SSH access to an instance - -== Prerequisites - -* OpenStack CLI tools are installed - -.Procedure - -. Get the IP address of the node on the private network: -+ -[source,terminal] ----- -$ openstack server list | grep master ----- -+ -.Example output -[source,terminal] ----- -| 0dcd756b-ad80-42f1-987a-1451b1ae95ba | cluster-wbzrr-master-1 | ACTIVE | cluster-wbzrr-openshift=172.24.0.21 | rhcos | m1.s2.xlarge | -| 3b455e43-729b-4e64-b3bd-1d4da9996f27 | cluster-wbzrr-master-2 | ACTIVE | cluster-wbzrr-openshift=172.24.0.18 | rhcos | m1.s2.xlarge | -| 775898c3-ecc2-41a4-b98b-a4cd5ae56fd0 | cluster-wbzrr-master-0 | ACTIVE | cluster-wbzrr-openshift=172.24.0.12 | rhcos | m1.s2.xlarge | ----- - -. Connect to the instance from the master that holds the API VIP (and API FIP) as a jumpbox: -+ -[source,terminal] ----- -$ ssh -J core@${FIP} core@ ----- diff --git a/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc b/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc deleted file mode 100644 index 731e5ac08ea4..000000000000 --- a/installing/installing_openstack/installing-openstack-user-sr-iov-kuryr.adoc +++ /dev/null @@ -1,88 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-openstack-user-sr-iov-kuryr"] -= Installing a cluster on OpenStack with Kuryr on your own SR-IOV infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-sr-iov-kuryr - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure and uses SR-IOV networks to run compute machines. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, such as Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* Your network configuration does not rely on a provider network. Provider networks are not supported. -* You have a {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine where you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/installation-osp-about-kuryr.adoc[leveloffset=+1] -include::modules/installation-osp-default-kuryr-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-increase-quota.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-neutron-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-octavia-configuration.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-known-limitations.adoc[leveloffset=+2] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -// include::modules/installation-osp-enabling-swift.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_openstack/installation-config-parameters-openstack.adoc#installation-config-parameters-openstack[Installation configuration parameters for OpenStack] - -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-kuryr-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-osp-modifying-networktype.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-osp-creating-sr-iov-compute-machines.adoc[leveloffset=+1] - -To finish configuring SR-IOV for your cluster, complete the SR-IOV-related "Next steps" that follow the installation process. - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* To complete SR-IOV configuration for your cluster: -** xref:../../post_installation_configuration/network-configuration.adoc#networking-osp-preparing-for-sr-iov_post-install-network-configuration[Prepare the cluster for SR-IOV]. -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Install the performance operator with huge pages support]. -** xref:../../networking/hardware_networks/installing-sriov-operator.adoc#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] -* xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[Configure ingress cluster traffic by using a node port] -* xref:../../installing/installing_openstack/installing-openstack-network-config.adoc#installation-osp-configuring-api-floating-ip_installing-openstack-network-config[Configure {rh-openstack} access with floating IP addresses] diff --git a/installing/installing_openstack/installing-openstack-user-sr-iov.adoc b/installing/installing_openstack/installing-openstack-user-sr-iov.adoc deleted file mode 100644 index b934b4ff9cf8..000000000000 --- a/installing/installing_openstack/installing-openstack-user-sr-iov.adoc +++ /dev/null @@ -1,103 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-openstack-user-sr-iov"] -= Installing a cluster on OpenStack on your own SR-IOV infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-openstack-user-sr-iov - -toc::[] - -In {product-title} {product-version}, you can install a cluster on -{rh-openstack-first} that runs on user-provisioned infrastructure and uses single-root input/output virtualization (SR-IOV) networks to run compute machines. - -Using your own infrastructure allows you to integrate your cluster with existing infrastructure and modifications. The process requires more labor on your part than installer-provisioned installations, because you must create all {rh-openstack} resources, such as Nova servers, Neutron ports, and security groups. However, Red Hat provides Ansible playbooks to help you in the deployment process. - -== Prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You verified that {product-title} {product-version} is compatible with your {rh-openstack} version by using the xref:../../architecture/architecture-installation.adoc#supported-platforms-for-openshift-clusters_architecture-installation[Supported platforms for OpenShift clusters] section. You can also compare platform support across different versions by viewing the link:https://access.redhat.com/articles/4679401[{product-title} on {rh-openstack} support matrix]. -* You have an {rh-openstack} account where you want to install {product-title}. -* You understand performance and scalability practices for cluster scaling, control plane sizing, and etcd. For more information, see xref:../../scalability_and_performance/recommended-performance-scale-practices/recommended-control-plane-practices.adoc#recommended-host-practices[Recommended practices for scaling the cluster]. -* On the machine where you run the installation program, you have: -** A single directory in which you can keep the files you create during the installation process -** Python 3 - -include::modules/cluster-entitlements.adoc[leveloffset=+1] -include::modules/installation-osp-default-deployment.adoc[leveloffset=+1] -include::modules/installation-osp-control-compute-machines.adoc[leveloffset=+2] -include::modules/installation-osp-bootstrap-machine.adoc[leveloffset=+2] -include::modules/installation-osp-downloading-modules.adoc[leveloffset=+1] -include::modules/installation-osp-downloading-playbooks.adoc[leveloffset=+1] -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] -include::modules/ssh-agent-using.adoc[leveloffset=+1] -include::modules/installation-osp-creating-image.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-external-network.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api.adoc[leveloffset=+1] -include::modules/installation-osp-accessing-api-floating.adoc[leveloffset=+2] -include::modules/installation-osp-accessing-api-no-floating.adoc[leveloffset=+2] -include::modules/installation-osp-describing-cloud-parameters.adoc[leveloffset=+1] -include::modules/installation-initializing.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../../installing/installing_openstack/installation-config-parameters-openstack.adoc#installation-config-parameters-openstack[Installation configuration parameters for OpenStack] - -include::modules/installation-osp-config-yaml.adoc[leveloffset=+2] -include::modules/installation-osp-custom-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-fixing-subnet.adoc[leveloffset=+2] -include::modules/installation-osp-emptying-worker-pools.adoc[leveloffset=+2] -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-converting-ignition-resources.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane-ignition.adoc[leveloffset=+1] -include::modules/installation-osp-creating-network-resources.adoc[leveloffset=+1] -Optionally, you can use the `inventory.yaml` file that you created to customize your installation. For example, you can deploy a cluster that uses bare metal machines. - -include::modules/installation-osp-deploying-bare-metal-machines.adoc[leveloffset=+2] -include::modules/installation-osp-creating-bootstrap-machine.adoc[leveloffset=+1] -include::modules/installation-osp-creating-control-plane.adoc[leveloffset=+1] -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] -include::modules/installation-osp-deleting-bootstrap-resources.adoc[leveloffset=+1] -include::modules/installation-osp-configuring-sr-iov.adoc[leveloffset=+1] -include::modules/installation-osp-creating-sr-iov-compute-machines.adoc[leveloffset=+1] -include::modules/installation-approve-csrs.adoc[leveloffset=+1] -include::modules/installation-osp-verifying-installation.adoc[leveloffset=+1] -The cluster is operational. Before you can configure it for SR-IOV networks though, you must perform additional tasks. - -include::modules/networking-osp-preparing-for-sr-iov.adoc[leveloffset=+1] -include::modules/networking-osp-enabling-metadata.adoc[leveloffset=+2] -include::modules/networking-osp-enabling-vfio-noiommu.adoc[leveloffset=+2] - -[NOTE] -==== -After you apply the machine config to the machine pool, you can xref:../../post_installation_configuration/machine-configuration-tasks.adoc#checking-mco-status_post-install-machine-configuration-tasks[watch the machine config pool status] to see when the machines are available. -==== - -// TODO: If bullet one of Next steps is truly required for this flow, these topics (in full or in part) could be added here rather than linked to. -// This document is quite long, however, and operator installation and configuration should arguably remain in their their own assemblies. - -The cluster is installed and prepared for SR-IOV configuration. You must now perform the SR-IOV configuration tasks in "Next steps". - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="additional-resources_cluster-telemetry"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -[role="_additional-resources"] -[id="additional-resources_installing-openstack-user-sr-iov"] -== Additional resources -* xref:../../scalability_and_performance/cnf-low-latency-tuning.adoc#cnf-understanding-low-latency_cnf-master[Low latency tuning of OpenShift Container Platform nodes] - -[id="next-steps_installing-user-sr-iov"] -== Next steps - -* To complete SR-IOV configuration for your cluster: -** xref:../../scalability_and_performance/what-huge-pages-do-and-how-they-are-consumed-by-apps.adoc#what-huge-pages-do_huge-pages[Configure huge pages support]. -** xref:../../networking/hardware_networks/installing-sriov-operator.adoc#installing-sr-iov-operator_installing-sriov-operator[Install the SR-IOV Operator]. -** xref:../../networking/hardware_networks/configuring-sriov-device.adoc#nw-sriov-networknodepolicy-object_configuring-sriov-device[Configure your SR-IOV network device]. -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster]. -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] -* xref:../../networking/configuring_ingress_cluster_traffic/configuring-ingress-cluster-traffic-nodeport.adoc#nw-using-nodeport_configuring-ingress-cluster-traffic-nodeport[Configure ingress cluster traffic by using a node port] -* xref:../../installing/installing_openstack/installing-openstack-network-config.adoc#installation-osp-configuring-api-floating-ip_installing-openstack-network-config[Configure {rh-openstack} access with floating IP addresses] diff --git a/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc b/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc deleted file mode 100644 index 47ce47fbae9d..000000000000 --- a/installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc +++ /dev/null @@ -1,156 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-restricted-networks-vmc-user-infra"] -= Installing a cluster on VMC in a restricted network with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-restricted-networks-vmc-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on VMware vSphere infrastructure that you provision in a restricted network by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You xref:../../installing/disconnected_install/installing-mirroring-installation-images.adoc#installing-mirroring-installation-images[created a registry on your mirror host] and obtain the `imageContentSources` data for your version of {product-title}. -+ -[IMPORTANT] -==== -Because the installation media is on the mirror host, you can use that computer -to complete all installation steps. -==== -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall and plan to use the Telemetry service, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured the firewall to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/installation-about-restricted-network.adoc[leveloffset=+1] - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-restricted-networks-vmc-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-restricted-networks-vmc-user-infra.adoc#deprecated-parameters-vsphere_installing-restricted-networks-vmc-user-infra[Deprecated VMware vSphere configuration parameters] - -//You extract the installation program from the mirrored content. - -//You can install the CLI on the mirror host. - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/olm-restricted-networks-configuring-operatorhub.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring registry storage for VMware vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* xref:../../post_installation_configuration/cluster-tasks.adoc#post-install-must-gather-disconnected[Configure image streams] -* xref:../../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[use Operator Lifecycle Manager (OLM) on restricted networks] -* xref:../../openshift_images/image-configuration.adoc#images-configuration-cas_image-configuration[configuring additional trust stores] -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc b/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc deleted file mode 100644 index 1be749229eef..000000000000 --- a/installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc +++ /dev/null @@ -1,139 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-vmc-network-customizations-user-infra"] -= Installing a cluster on VMC with user-provisioned infrastructure and network customizations -include::_attributes/common-attributes.adoc[] -:context: installing-vmc-network-customizations-user-infra - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on your VMware vSphere instance using infrastructure you provision with customized network configuration options by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -By customizing your network configuration, your cluster can coexist with existing IP address allocations in your environment and integrate with existing VXLAN configurations. You must set most of the network configuration parameters during installation, and you can modify only `kubeProxy` configuration parameters in a running cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-vmc-network-customizations-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-vmc-network-customizations-user-infra.adoc#deprecated-parameters-vsphere_installing-vmc-network-customizations-user-infra[Deprecated VMware vSphere configuration parameters] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -// Network Operator specific configuration - -include::modules/nw-modifying-operator-install-config.adoc[leveloffset=+1] -include::modules/nw-operator-cr.adoc[leveloffset=+1] - -include::modules/installation-generate-ignition-configs.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage] -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/installing/installing_vmc/installing-vmc-user-infra.adoc b/installing/installing_vmc/installing-vmc-user-infra.adoc deleted file mode 100644 index de446d0b6c07..000000000000 --- a/installing/installing_vmc/installing-vmc-user-infra.adoc +++ /dev/null @@ -1,146 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-vmc-user-infra"] -= Installing a cluster on VMC with user-provisioned infrastructure -include::_attributes/common-attributes.adoc[] -:context: installing-vmc-user-infra -:platform: VMC - -toc::[] - -In {product-title} version {product-version}, you can install a cluster on VMware vSphere infrastructure that you provision by deploying it to link:https://cloud.vmware.com/vmc-aws[VMware Cloud (VMC) on AWS]. - -Once you configure your VMC environment for {product-title} deployment, you use the {product-title} installation program from the bastion management host, co-located in the VMC environment. The installation program and control plane automates the process of deploying and managing the resources needed for the {product-title} cluster. - -include::snippets/vcenter-support.adoc[] - -include::modules/setting-up-vmc-for-vsphere.adoc[leveloffset=+1] -include::modules/vmc-sizer-tool.adoc[leveloffset=+2] - -== vSphere prerequisites - -* You reviewed details about the xref:../../architecture/architecture-installation.adoc#architecture-installation[{product-title} installation and update] processes. -* You read the documentation on xref:../../installing/overview/installing-preparing.adoc#installing-preparing[selecting a cluster installation method and preparing it for users]. -* You provisioned xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#installation-registry-storage-block-recreate-rollout_configuring-registry-storage-vsphere[block registry storage]. For more information on persistent storage, see xref:../../storage/understanding-persistent-storage.adoc#understanding-persistent-storage[Understanding persistent storage]. -* If you use a firewall, you xref:../../installing/install_config/configuring-firewall.adoc#configuring-firewall[configured it to allow the sites] that your cluster requires access to. -+ -[NOTE] -==== -Be sure to also review this site list if you are configuring a proxy. -==== - -include::modules/cluster-entitlements.adoc[leveloffset=+1] - -include::modules/installation-vsphere-infrastructure.adoc[leveloffset=+1] - -include::modules/vmware-csi-driver-reqs.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* To remove a third-party CSI driver, see xref:../../storage/container_storage_interface/persistent-storage-csi-vsphere.adoc#persistent-storage-csi-vsphere-install-issues_persistent-storage-csi-vsphere[Removing a third-party vSphere CSI Driver]. -* To update the hardware version for your vSphere nodes, see xref:../../updating/updating_a_cluster/updating-hardware-on-nodes-running-on-vsphere.adoc#updating-hardware-on-nodes-running-on-vsphere[Updating hardware on nodes running in vSphere]. - -[id="installation-requirements-user-infra_{context}"] -== Requirements for a cluster with user-provisioned infrastructure - -For a cluster that contains user-provisioned infrastructure, you must deploy all -of the required machines. - -This section describes the requirements for deploying {product-title} on user-provisioned infrastructure. - -include::modules/installation-vsphere-installer-infra-requirements.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources -* xref:../../machine_management/creating_machinesets/creating-machineset-vsphere.adoc#creating-machineset-vsphere_creating-machineset-vsphere[Creating a compute machine set on vSphere] - -include::modules/installation-machine-requirements.adoc[leveloffset=+2] -include::modules/installation-minimum-resource-requirements.adoc[leveloffset=+2] -include::modules/csr-management.adoc[leveloffset=+2] - -include::modules/installation-network-user-infra.adoc[leveloffset=+2] - -include::modules/installation-dns-user-infra.adoc[leveloffset=+2] - -include::modules/installation-load-balancing-user-infra.adoc[leveloffset=+2] - -include::modules/installation-infrastructure-user-infra.adoc[leveloffset=+1] - -include::modules/installation-user-provisioned-validating-dns.adoc[leveloffset=+1] - -include::modules/ssh-agent-using.adoc[leveloffset=+1] - -include::modules/installation-vsphere-regions-zones.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../installing/installing_vmc/installing-vmc-user-infra.adoc#installation-configuration-parameters-additional-vsphere_installing-vmc-user-infra[Additional VMware vSphere configuration parameters] - -* xref:../../installing/installing_vmc/installing-vmc-user-infra.adoc#deprecated-parameters-vsphere_installing-vmc-user-infra[Deprecated VMware vSphere configuration parameters] - -include::modules/installation-obtaining-installer.adoc[leveloffset=+1] - -include::modules/installation-initializing-manual.adoc[leveloffset=+1] - -include::modules/installation-configuration-parameters.adoc[leveloffset=+2] - -include::modules/installation-vsphere-config-yaml.adoc[leveloffset=+2] - -include::modules/installation-configure-proxy.adoc[leveloffset=+2] - -include::modules/configuring-vsphere-regions-zones.adoc[leveloffset=+2] - -//include::modules/installation-three-node-cluster.adoc[leveloffset=+2] - -include::modules/installation-user-infra-generate-k8s-manifest-ignition.adoc[leveloffset=+1] - -include::modules/installation-extracting-infraid.adoc[leveloffset=+1] - -include::modules/installation-vsphere-machines.adoc[leveloffset=+1] - -include::modules/machine-vsphere-machines.adoc[leveloffset=+1] - -include::modules/installation-disk-partitioning.adoc[leveloffset=+1] - -include::modules/cli-installing-cli.adoc[leveloffset=+1] - -include::modules/installation-installing-bare-metal.adoc[leveloffset=+1] - -include::modules/cli-logging-in-kubeadmin.adoc[leveloffset=+1] - -include::modules/installation-approve-csrs.adoc[leveloffset=+1] - -include::modules/installation-operators-config.adoc[leveloffset=+1] - -include::modules/registry-removed.adoc[leveloffset=+2] - -include::modules/installation-registry-storage-config.adoc[leveloffset=+2] - -include::modules/registry-configuring-storage-vsphere.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-non-production.adoc[leveloffset=+3] - -include::modules/installation-registry-storage-block-recreate-rollout.adoc[leveloffset=+3] - -For instructions about configuring registry storage so that it references the correct PVC, see xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#registry-configuring-storage-vsphere_configuring-registry-storage-vsphere[Configuring the registry for vSphere]. - -include::modules/installation-complete-user-infra.adoc[leveloffset=+1] - -You can add extra compute machines after the cluster installation is completed by following xref:../../machine_management/user_infra/adding-vsphere-compute-user-infra.adoc#adding-vsphere-compute-user-infra[Adding compute machines to vSphere]. - -include::modules/persistent-storage-vsphere-backup.adoc[leveloffset=+1] - -include::modules/cluster-telemetry.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* See xref:../../support/remote_health_monitoring/about-remote-health-monitoring.adoc#about-remote-health-monitoring[About remote health monitoring] for more information about the Telemetry service - -== Next steps - -* xref:../../post_installation_configuration/cluster-tasks.adoc#available_cluster_customizations[Customize your cluster] -* xref:../../support/remote_health_monitoring/remote-health-reporting.adoc#remote-health-reporting[Remote health reporting] -* xref:../../registry/configuring_registry_storage/configuring-registry-storage-vsphere.adoc#configuring-registry-storage-vsphere[Set up your registry and configure registry storage] -* Optional: xref:../../installing/installing_vsphere/using-vsphere-problem-detector-operator.adoc#vsphere-problem-detector-viewing-events_vsphere-problem-detector[View the events from the vSphere Problem Detector Operator] to determine if the cluster has permission or storage configuration issues. diff --git a/machine_management/cluster_api_machine_management/cluster-api-disabling.adoc b/machine_management/cluster_api_machine_management/cluster-api-disabling.adoc deleted file mode 100644 index 9161919b9e0c..000000000000 --- a/machine_management/cluster_api_machine_management/cluster-api-disabling.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cluster-api-disabling"] -= Disabling Cluster API machine sets -include::_attributes/common-attributes.adoc[] -:context: cluster-api-disabling - -toc::[] - -:FeatureName: Managing machines with the Cluster API -include::snippets/technology-preview.adoc[] - -//Placeholder assembly, commented out in the topic map. -//how to disable, clean up, verify, and reenable \ No newline at end of file diff --git a/machine_management/cluster_api_machine_management/cluster-api-resiliency.adoc b/machine_management/cluster_api_machine_management/cluster-api-resiliency.adoc deleted file mode 100644 index 5bb4166d16a7..000000000000 --- a/machine_management/cluster_api_machine_management/cluster-api-resiliency.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="cluster-api-resiliency"] -= Cluster API resiliency and recovery -include::_attributes/common-attributes.adoc[] -:context: cluster-api-resiliency - -toc::[] - -:FeatureName: Managing machines with the Cluster API -include::snippets/technology-preview.adoc[] - -//Placeholder assembly, commented out in the topic map. -// MAPI would be HA/failure domains, recovery with machine health checks. Not sure about CAPI version \ No newline at end of file diff --git a/metering/configuring_metering/metering-about-configuring.adoc b/metering/configuring_metering/metering-about-configuring.adoc deleted file mode 100644 index fb0dddd51d36..000000000000 --- a/metering/configuring_metering/metering-about-configuring.adoc +++ /dev/null @@ -1,20 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-about-configuring"] -= About configuring metering -include::_attributes/common-attributes.adoc[] -:context: metering-about-configuring - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -The `MeteringConfig` custom resource specifies all the configuration details for your metering installation. When you first install the metering stack, a default `MeteringConfig` custom resource is generated. Use the examples in the documentation to modify this default file. Keep in mind the following key points: - -* At a minimum, you need to xref:../../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[configure persistent storage] and xref:../../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[configure the Hive metastore]. - -* Most default configuration settings work, but larger deployments or highly customized deployments should review all configuration options carefully. - -* Some configuration options can not be modified after installation. - -For configuration options that can be modified after installation, make the changes in your `MeteringConfig` custom resource and reapply the file. diff --git a/metering/configuring_metering/metering-common-config-options.adoc b/metering/configuring_metering/metering-common-config-options.adoc deleted file mode 100644 index 3e1b3746bed5..000000000000 --- a/metering/configuring_metering/metering-common-config-options.adoc +++ /dev/null @@ -1,175 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-common-config-options"] -= Common configuration options -include::_attributes/common-attributes.adoc[] -:context: metering-common-config-options - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -== Resource requests and limits -You can adjust the CPU, memory, or storage resource requests and/or limits for pods and volumes. The `default-resource-limits.yaml` below provides an example of setting resource request and limits for each component. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - reporting-operator: - spec: - resources: - limits: - cpu: 1 - memory: 500Mi - requests: - cpu: 500m - memory: 100Mi - presto: - spec: - coordinator: - resources: - limits: - cpu: 4 - memory: 4Gi - requests: - cpu: 2 - memory: 2Gi - - worker: - replicas: 0 - resources: - limits: - cpu: 8 - memory: 8Gi - requests: - cpu: 4 - memory: 2Gi - - hive: - spec: - metastore: - resources: - limits: - cpu: 4 - memory: 2Gi - requests: - cpu: 500m - memory: 650Mi - storage: - class: null - create: true - size: 5Gi - server: - resources: - limits: - cpu: 1 - memory: 1Gi - requests: - cpu: 500m - memory: 500Mi ----- - -== Node selectors -You can run the metering components on specific sets of nodes. Set the `nodeSelector` on a metering component to control where the component is scheduled. The `node-selectors.yaml` file below provides an example of setting node selectors for each component. - -[NOTE] -==== -Add the `openshift.io/node-selector: ""` namespace annotation to the metering namespace YAML file before configuring specific node selectors for the operand pods. Specify `""` as the annotation value. -==== - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - reporting-operator: - spec: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - - presto: - spec: - coordinator: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - worker: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - hive: - spec: - metastore: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> - server: - nodeSelector: - "node-role.kubernetes.io/infra": "" <1> ----- -<1> Add a `nodeSelector` parameter with the appropriate value to the component you want to move. You can use a `nodeSelector` in the format shown or use key-value pairs, based on the value specified for the node. - -[NOTE] -==== -Add the `openshift.io/node-selector: ""` namespace annotation to the metering namespace YAML file before configuring specific node selectors for the operand pods. When the `openshift.io/node-selector` annotation is set on the project, the value is used in preference to the value of the `spec.defaultNodeSelector` field in the cluster-wide `Scheduler` object. -==== - -.Verification - -You can verify the metering node selectors by performing any of the following checks: - -* Verify that all pods for metering are correctly scheduled on the IP of the node that is configured in the `MeteringConfig` custom resource: -+ --- -. Check all pods in the `openshift-metering` namespace: -+ -[source,terminal] ----- -$ oc --namespace openshift-metering get pods -o wide ----- -+ -The output shows the `NODE` and corresponding `IP` for each pod running in the `openshift-metering` namespace. -+ -.Example output -[source,terminal] ----- -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -hive-metastore-0 1/2 Running 0 4m33s 10.129.2.26 ip-10-0-210-167.us-east-2.compute.internal -hive-server-0 2/3 Running 0 4m21s 10.128.2.26 ip-10-0-150-175.us-east-2.compute.internal -metering-operator-964b4fb55-4p699 2/2 Running 0 7h30m 10.131.0.33 ip-10-0-189-6.us-east-2.compute.internal -nfs-server 1/1 Running 0 7h30m 10.129.2.24 ip-10-0-210-167.us-east-2.compute.internal -presto-coordinator-0 2/2 Running 0 4m8s 10.131.0.35 ip-10-0-189-6.us-east-2.compute.internal -reporting-operator-869b854c78-8g2x5 1/2 Running 0 7h27m 10.128.2.25 ip-10-0-150-175.us-east-2.compute.internal ----- -+ -. Compare the nodes in the `openshift-metering` namespace to each node `NAME` in your cluster: -+ -[source,terminal] ----- -$ oc get nodes ----- -+ -.Example output -[source,terminal] ----- -NAME STATUS ROLES AGE VERSION -ip-10-0-147-106.us-east-2.compute.internal Ready master 14h v1.27.3 -ip-10-0-150-175.us-east-2.compute.internal Ready worker 14h v1.27.3 -ip-10-0-175-23.us-east-2.compute.internal Ready master 14h v1.27.3 -ip-10-0-189-6.us-east-2.compute.internal Ready worker 14h v1.27.3 -ip-10-0-205-158.us-east-2.compute.internal Ready master 14h v1.27.3 -ip-10-0-210-167.us-east-2.compute.internal Ready worker 14h v1.27.3 ----- --- - -* Verify that the node selector configuration in the `MeteringConfig` custom resource does not interfere with the cluster-wide node selector configuration such that no metering operand pods are scheduled. - -** Check the cluster-wide `Scheduler` object for the `spec.defaultNodeSelector` field, which shows where pods are scheduled by default: -+ -[source,terminal] ----- -$ oc get schedulers.config.openshift.io cluster -o yaml ----- diff --git a/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc b/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc deleted file mode 100644 index 703e8be13d54..000000000000 --- a/metering/configuring_metering/metering-configure-aws-billing-correlation.adoc +++ /dev/null @@ -1,116 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-configure-aws-billing-correlation"] -= Configure AWS billing correlation -include::_attributes/common-attributes.adoc[] -:context: metering-configure-aws-billing-correlation - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Metering can correlate cluster usage information with https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-reports-costusage.html[AWS detailed billing information], attaching a dollar amount to resource usage. For clusters running in EC2, you can enable this by modifying the example `aws-billing.yaml` file below. - -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: MeteringConfig -metadata: - name: "operator-metering" -spec: - openshift-reporting: - spec: - awsBillingReportDataSource: - enabled: true - # Replace these with where your AWS billing reports are - # stored in S3. - bucket: "" <1> - prefix: "" - region: "" - - reporting-operator: - spec: - config: - aws: - secretName: "" <2> - - presto: - spec: - config: - aws: - secretName: "" <2> - - hive: - spec: - config: - aws: - secretName: "" <2> ----- -To enable AWS billing correlation, first ensure the AWS Cost and Usage Reports are enabled. For more information, see https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-reports-gettingstarted-turnonreports.html[Turning on the AWS Cost and Usage Report] in the AWS documentation. - -<1> Update the bucket, prefix, and region to the location of your AWS Detailed billing report. -<2> All `secretName` fields should be set to the name of a secret in the metering namespace containing AWS credentials in the `data.aws-access-key-id` and `data.aws-secret-access-key` fields. See the example secret file below for more details. - -[source,yaml] ----- -apiVersion: v1 -kind: Secret -metadata: - name: -data: - aws-access-key-id: "dGVzdAo=" - aws-secret-access-key: "c2VjcmV0Cg==" ----- - -To store data in S3, the `aws-access-key-id` and `aws-secret-access-key` credentials must have read and write access to the bucket. For an example of an IAM policy granting the required permissions, see the `aws/read-write.json` file below. - -[source,json] ----- -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", <1> - "arn:aws:s3:::operator-metering-data" <1> - ] - } - ] -} -{ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "1", - "Effect": "Allow", - "Action": [ - "s3:AbortMultipartUpload", - "s3:DeleteObject", - "s3:GetObject", - "s3:HeadBucket", - "s3:ListBucket", - "s3:ListMultipartUploadParts", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::operator-metering-data/*", <1> - "arn:aws:s3:::operator-metering-data" <1> - ] - } - ] -} ----- -<1> Replace `operator-metering-data` with the name of your bucket. - -This can be done either preinstallation or postinstallation. Disabling it postinstallation can cause errors in the Reporting Operator. diff --git a/metering/configuring_metering/metering-configure-hive-metastore.adoc b/metering/configuring_metering/metering-configure-hive-metastore.adoc deleted file mode 100644 index f36dd78a5cb1..000000000000 --- a/metering/configuring_metering/metering-configure-hive-metastore.adoc +++ /dev/null @@ -1,18 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-configure-hive-metastore"] -= Configuring the Hive metastore -include::_attributes/common-attributes.adoc[] -:context: metering-configure-hive-metastore - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Hive metastore is responsible for storing all the metadata about the database tables created in Presto and Hive. By default, the metastore stores this information in a local embedded Derby database in a persistent volume attached to the pod. - -Generally, the default configuration of the Hive metastore works for small clusters, but users may wish to improve performance or move storage requirements out of cluster by using a dedicated SQL database for storing the Hive metastore data. - -include::modules/metering-configure-persistentvolumes.adoc[leveloffset=+1] - -include::modules/metering-use-mysql-or-postgresql-for-hive.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/metering-configure-persistent-storage.adoc b/metering/configuring_metering/metering-configure-persistent-storage.adoc deleted file mode 100644 index e67ab29b3265..000000000000 --- a/metering/configuring_metering/metering-configure-persistent-storage.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-configure-persistent-storage"] -= Configuring persistent storage -include::_attributes/common-attributes.adoc[] -:context: metering-configure-persistent-storage - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Metering requires persistent storage to persist data collected by the Metering Operator and to store the results of reports. A number of different storage providers and storage formats are supported. Select your storage provider and modify the example configuration files to configure persistent storage for your metering installation. - -include::modules/metering-store-data-in-s3.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-s3-compatible.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-azure.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-gcp.adoc[leveloffset=+1] - -include::modules/metering-store-data-in-shared-volumes.adoc[leveloffset=+1] diff --git a/metering/configuring_metering/metering-configure-reporting-operator.adoc b/metering/configuring_metering/metering-configure-reporting-operator.adoc deleted file mode 100644 index db379aad2754..000000000000 --- a/metering/configuring_metering/metering-configure-reporting-operator.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-configure-reporting-operator"] -= Configuring the Reporting Operator -include::_attributes/common-attributes.adoc[] -:context: metering-configure-reporting-operator - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -The Reporting Operator is responsible for collecting data from Prometheus, storing the metrics in Presto, running report queries against Presto, and exposing their results via an HTTP API. Configuring the Reporting Operator is primarily done in your `MeteringConfig` custom resource. - -include::modules/metering-prometheus-connection.adoc[leveloffset=+1] - -include::modules/metering-exposing-the-reporting-api.adoc[leveloffset=+1] diff --git a/metering/metering-about-metering.adoc b/metering/metering-about-metering.adoc deleted file mode 100644 index 2af4a51b40e3..000000000000 --- a/metering/metering-about-metering.adoc +++ /dev/null @@ -1,12 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="about-metering"] -= About Metering -include::_attributes/common-attributes.adoc[] -:context: about-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -include::modules/metering-overview.adoc[leveloffset=+1] diff --git a/metering/metering-installing-metering.adoc b/metering/metering-installing-metering.adoc deleted file mode 100644 index c31ae259dd3c..000000000000 --- a/metering/metering-installing-metering.adoc +++ /dev/null @@ -1,62 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="installing-metering"] -= Installing metering -include::_attributes/common-attributes.adoc[] -:context: installing-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Review the following sections before installing metering into your cluster. - -To get started installing metering, first install the Metering Operator from OperatorHub. Next, configure your instance of metering by creating a `MeteringConfig` custom resource (CR). Installing the Metering Operator creates a default `MeteringConfig` resource that you can modify using the examples in the documentation. After creating your `MeteringConfig` resource, install the metering stack. Last, verify your installation. - -include::modules/metering-install-prerequisites.adoc[leveloffset=+1] - -include::modules/metering-install-operator.adoc[leveloffset=+1] - -// Including this content directly in the assembly because the workflow requires linking off to the config docs, and we don't current link -// inside of modules - klamenzo 2019-09-23 -[id="metering-install-metering-stack_{context}"] -== Installing the metering stack - -After adding the Metering Operator to your cluster you can install the components of metering by installing the metering stack. - -== Prerequisites - -* Review the xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[configuration options] -* Create a `MeteringConfig` resource. You can begin the following process to generate a default `MeteringConfig` resource, then use the examples in the documentation to modify this default file for your specific installation. Review the following topics to create your `MeteringConfig` resource: -** For configuration options, review xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[About configuring metering]. -** At a minimum, you need to xref:../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[configure persistent storage] and xref:../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[configure the Hive metastore]. - -[IMPORTANT] -==== -There can only be one `MeteringConfig` resource in the `openshift-metering` namespace. Any other configuration is not supported. -==== - -.Procedure - -. From the web console, ensure you are on the *Operator Details* page for the Metering Operator in the `openshift-metering` project. You can navigate to this page by clicking *Operators* -> *Installed Operators*, then selecting the Metering Operator. - -. Under *Provided APIs*, click *Create Instance* on the Metering Configuration card. This opens a YAML editor with the default `MeteringConfig` resource file where you can define your configuration. -+ -[NOTE] -==== -For example configuration files and all supported configuration options, review the xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[configuring metering documentation]. -==== - -. Enter your `MeteringConfig` resource into the YAML editor and click *Create*. - -The `MeteringConfig` resource begins to create the necessary resources for your metering stack. You can now move on to verifying your installation. - -include::modules/metering-install-verify.adoc[leveloffset=+1] - -[role="_additional-resources"] -[id="metering-install-additional-resources_{context}"] -== Additional resources - -* For more information on configuration steps and available storage platforms, see xref:../metering/configuring_metering/metering-configure-persistent-storage.adoc#metering-configure-persistent-storage[Configuring persistent storage]. - -* For the steps to configure Hive, see xref:../metering/configuring_metering/metering-configure-hive-metastore.adoc#metering-configure-hive-metastore[Configuring the Hive metastore]. diff --git a/metering/metering-troubleshooting-debugging.adoc b/metering/metering-troubleshooting-debugging.adoc deleted file mode 100644 index bb2c710146ec..000000000000 --- a/metering/metering-troubleshooting-debugging.adoc +++ /dev/null @@ -1,21 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-troubleshooting-debugging"] -= Troubleshooting and debugging metering -include::_attributes/common-attributes.adoc[] -:context: metering-troubleshooting-debugging - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Use the following sections to help troubleshoot and debug specific issues with metering. - -In addition to the information in this section, be sure to review the following topics: - -* xref:../metering/metering-installing-metering.adoc#metering-install-prerequisites_installing-metering[Prerequisites for installing metering]. -* xref:../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[About configuring metering] - -include::modules/metering-troubleshooting.adoc[leveloffset=+1] - -include::modules/metering-debugging.adoc[leveloffset=+1] diff --git a/metering/metering-uninstall.adoc b/metering/metering-uninstall.adoc deleted file mode 100644 index 8237458f7776..000000000000 --- a/metering/metering-uninstall.adoc +++ /dev/null @@ -1,31 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: metering-uninstall -[id="metering-uninstall"] -= Uninstalling metering -include::_attributes/common-attributes.adoc[] - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -You can remove metering from your {product-title} cluster. - -[NOTE] -==== -Metering does not manage or delete Amazon S3 bucket data. After uninstalling metering, you must manually clean up S3 buckets that were used to store metering data. -==== - -[id="metering-remove"] -== Removing the Metering Operator from your cluster - -Remove the Metering Operator from your cluster by following the documentation on xref:../operators/admin/olm-deleting-operators-from-cluster.adoc#olm-deleting-operators-from-a-cluster[deleting Operators from a cluster]. - -[NOTE] -==== -Removing the Metering Operator from your cluster does not remove its custom resource definitions or managed resources. See the following sections on xref:../metering/metering-uninstall.adoc#metering-uninstall_metering-uninstall[Uninstalling a metering namespace] and xref:../metering/metering-uninstall.adoc#metering-uninstall-crds_metering-uninstall[Uninstalling metering custom resource definitions] for steps to remove any remaining metering components. -==== - -include::modules/metering-uninstall.adoc[leveloffset=+1] - -include::modules/metering-uninstall-crds.adoc[leveloffset=+1] diff --git a/metering/metering-usage-examples.adoc b/metering/metering-usage-examples.adoc deleted file mode 100644 index 29df37d8afba..000000000000 --- a/metering/metering-usage-examples.adoc +++ /dev/null @@ -1,22 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-usage-examples"] -= Examples of using metering -include::_attributes/common-attributes.adoc[] -:context: metering-usage-examples - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -Use the following example reports to get started measuring capacity, usage, and utilization in your cluster. These examples showcase the various types of reports metering offers, along with a selection of the predefined queries. - -== Prerequisites -* xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Install metering] -* Review the details about xref:../metering/metering-using-metering#using-metering[writing and viewing reports]. - -include::modules/metering-cluster-capacity-examples.adoc[leveloffset=+1] - -include::modules/metering-cluster-usage-examples.adoc[leveloffset=+1] - -include::modules/metering-cluster-utilization-examples.adoc[leveloffset=+1] diff --git a/metering/metering-using-metering.adoc b/metering/metering-using-metering.adoc deleted file mode 100644 index 55246522f4ef..000000000000 --- a/metering/metering-using-metering.adoc +++ /dev/null @@ -1,19 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-metering"] -= Using Metering -include::_attributes/common-attributes.adoc[] -:context: using-metering - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -== Prerequisites - -* xref:../metering/metering-installing-metering.adoc#metering-install-operator_installing-metering[Install Metering] -* Review the details about the available options that can be configured for a xref:../metering/reports/metering-about-reports.adoc#metering-about-reports[report] and how they function. - -include::modules/metering-writing-reports.adoc[leveloffset=+1] - -include::modules/metering-viewing-report-results.adoc[leveloffset=+1] diff --git a/metering/reports/metering-about-reports.adoc b/metering/reports/metering-about-reports.adoc deleted file mode 100644 index f5bde2f08382..000000000000 --- a/metering/reports/metering-about-reports.adoc +++ /dev/null @@ -1,16 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-about-reports"] -= About Reports -include::_attributes/common-attributes.adoc[] -:context: metering-about-reports - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -A `Report` custom resource provides a method to manage periodic Extract Transform and Load (ETL) jobs using SQL queries. Reports are composed from other metering resources, such as `ReportQuery` resources that provide the actual SQL query to run, and `ReportDataSource` resources that define the data available to the `ReportQuery` and `Report` resources. - -Many use cases are addressed by the predefined `ReportQuery` and `ReportDataSource` resources that come installed with metering. Therefore, you do not need to define your own unless you have a use case that is not covered by these predefined resources. - -include::modules/metering-reports.adoc[leveloffset=+1] diff --git a/metering/reports/metering-storage-locations.adoc b/metering/reports/metering-storage-locations.adoc deleted file mode 100644 index dccfe8e6a580..000000000000 --- a/metering/reports/metering-storage-locations.adoc +++ /dev/null @@ -1,83 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="metering-storage-locations"] -= Storage locations -include::_attributes/common-attributes.adoc[] -:context: metering-storage-locations - -toc::[] - -:FeatureName: Metering -include::modules/deprecated-feature.adoc[leveloffset=+1] - -A `StorageLocation` custom resource configures where data will be stored by the Reporting Operator. This includes the data collected from Prometheus, and the results produced by generating a `Report` custom resource. - -You only need to configure a `StorageLocation` custom resource if you want to store data in multiple locations, like multiple S3 buckets or both S3 and HDFS, or if you wish to access a database in Hive and Presto that was not created by metering. For most users this is not a requirement, and the xref:../../metering/configuring_metering/metering-about-configuring.adoc#metering-about-configuring[documentation on configuring metering] is sufficient to configure all necessary storage components. - -== Storage location examples - -The following example shows the built-in local storage option, and is configured to use Hive. By default, data is stored wherever Hive is configured to use storage, such as HDFS, S3, or a `ReadWriteMany` persistent volume claim (PVC). - -.Local storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: hive - labels: - operator-metering: "true" -spec: - hive: <1> - databaseName: metering <2> - unmanagedDatabase: false <3> ----- - -<1> If the `hive` section is present, then the `StorageLocation` resource will be configured to store data in Presto by creating the table using the Hive server. Only `databaseName` and `unmanagedDatabase` are required fields. -<2> The name of the database within hive. -<3> If `true`, the `StorageLocation` resource will not be actively managed, and the `databaseName` is expected to already exist in Hive. If `false`, the Reporting Operator will create the database in Hive. - -The following example uses an AWS S3 bucket for storage. The prefix is appended to the bucket name when constructing the path to use. - -.Remote storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: example-s3-storage - labels: - operator-metering: "true" -spec: - hive: - databaseName: example_s3_storage - unmanagedDatabase: false - location: "s3a://bucket-name/path/within/bucket" <1> ----- -<1> Optional: The filesystem URL for Presto and Hive to use for the database. This can be an `hdfs://` or `s3a://` filesystem URL. - -There are additional optional fields that can be specified in the `hive` section: - -* `defaultTableProperties`: Contains configuration options for creating tables using Hive. -* `fileFormat`: The file format used for storing files in the filesystem. See the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-StorageFormatsStorageFormatsRowFormat,StorageFormat,andSerDe[Hive Documentation on File Storage Format] for a list of options and more details. -* `rowFormat`: Controls the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe[ Hive row format]. This controls how Hive serializes and deserializes rows. See the link:https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe[Hive Documentation on Row Formats and SerDe] for more details. - -== Default storage location -If an annotation `storagelocation.metering.openshift.io/is-default` exists and is set to `true` on a `StorageLocation` resource, then that resource becomes the default storage resource. Any components with a storage configuration option where the storage location is not specified will use the default storage resource. There can be only one default storage resource. If more than one resource with the annotation exists, an error is logged because the Reporting Operator cannot determine the default. - -.Default storage example -[source,yaml] ----- -apiVersion: metering.openshift.io/v1 -kind: StorageLocation -metadata: - name: example-s3-storage - labels: - operator-metering: "true" - annotations: - storagelocation.metering.openshift.io/is-default: "true" -spec: - hive: - databaseName: example_s3_storage - unmanagedDatabase: false - location: "s3a://bucket-name/path/within/bucket" ----- diff --git a/networking/k8s_nmstate/k8s-nmstate-observing-node-network-state.adoc b/networking/k8s_nmstate/k8s-nmstate-observing-node-network-state.adoc deleted file mode 100644 index 1469c8f74728..000000000000 --- a/networking/k8s_nmstate/k8s-nmstate-observing-node-network-state.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="k8s-nmstate-observing-node-network-state"] -= Observing node network state -include::_attributes/common-attributes.adoc[] -:VirtProductName: OpenShift Container Platform -:context: k8s-nmstate-observing-node-network-state - -toc::[] - -Node network state is the network configuration for all nodes in the cluster. - -include::modules/virt-viewing-network-state-of-node.adoc[leveloffset=+1] - -include::modules/virt-viewing-network-state-of-node-console.adoc[leveloffset=+1] diff --git a/networking/ovn_kubernetes_network_provider/configuring-gateway.adoc b/networking/ovn_kubernetes_network_provider/configuring-gateway.adoc deleted file mode 100644 index 1dd3f3a0727e..000000000000 --- a/networking/ovn_kubernetes_network_provider/configuring-gateway.adoc +++ /dev/null @@ -1,14 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="configuring-gateway"] -= Configuring a gateway -include::_attributes/common-attributes.adoc[] -:context: configuring-gateway-mode - -toc::[] - -As a cluster administrator you can configure the `gatewayConfig` object to manage how external traffic leaves the cluster. You do so by setting the `routingViaHost` parameter to one of the following values: - -* `true` means that egress traffic routes through a specific local gateway on the node that hosts the pod. Egress traffic routes through the host and this traffic applies to the routing table of the host. -* `false` means that egress traffic routes through a dedicated node but a group of nodes share the same gateway. Egress traffic does not route through the host. The Open vSwitch (OVS) outputs traffic directly to the node IP interface. - -include::modules/nwt-configure-egress-routing-policies.adoc[leveloffset=+1] \ No newline at end of file diff --git a/networking/using-cookies-to-keep-route-statefulness.adoc b/networking/using-cookies-to-keep-route-statefulness.adoc deleted file mode 100644 index 54dca1975e29..000000000000 --- a/networking/using-cookies-to-keep-route-statefulness.adoc +++ /dev/null @@ -1,28 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="using-cookies-to-keep-route-statefulness"] -= Using cookies to keep route statefulness -{product-author} -{product-version} -:data-uri: -:icons: -:experimental: -:toc: macro -:toc-title: -:prewrap!: -:context: using-cookies-to-keep-route-statefulness - -toc::[] - -{product-title} provides sticky sessions, which enables stateful application -traffic by ensuring all traffic hits the same endpoint. However, if the endpoint -pod terminates, whether through restart, scaling, or a change in configuration, -this statefulness can disappear. - -{product-title} can use cookies to configure session persistence. The router -selects an endpoint to handle any user requests, and creates a cookie for the -session. The cookie is passed back in the response to the request and the user -sends the cookie back with the next request in the session. The cookie tells the -router which endpoint is handling the session, ensuring that client requests use -the cookie so that they are routed to the same pod. - -include::modules/annotating-a-route-with-a-cookie-name.adoc[leveloffset=+1] diff --git a/post_installation_configuration/deploy-heterogeneous-configuration.adoc b/post_installation_configuration/deploy-heterogeneous-configuration.adoc deleted file mode 100644 index 71a3bd7aa489..000000000000 --- a/post_installation_configuration/deploy-heterogeneous-configuration.adoc +++ /dev/null @@ -1,30 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -:context: multi-architecture-configuration -[id="post-install-multi-architecture-configuration"] -= Configuring a multi-architecture cluster -include::_attributes/common-attributes.adoc[] - -toc::[] - -A multi-architecture cluster is a cluster that supports worker machines with different architectures. You can deploy a multi-architecture cluster by creating an Azure installer-provisioned cluster using the multi-architecture installer binary. For Azure installation, see xref:../installing/installing_azure/installing-azure-customizations.adoc[Installing on Azure with customizations]. - -[WARNING] -==== -The multi-architecture clusters Technology Preview feature has limited usability with installing, upgrading, and running payloads. -==== - -The following procedures explain how to generate an `arm64` boot image and create an Azure compute machine set with the `arm64` boot image. This will add `arm64` worker nodes to your multi-architecture cluster and deploy the desired amount of ARM64 virtual machines (VM). This section also shows how to upgrade your existing cluster to a multi-architecture cluster. Multi-architecture clusters are only available on Azure installer-provisioned infrastructures with `x86_64` control planes. - -:FeatureName: Multi-architecture clusters for {product-title} on Azure installer-provisioned infrastructure installations -include::snippets/technology-preview.adoc[leveloffset=+1] - -include::modules/multi-architecture-creating-arm64-bootimage.adoc[leveloffset=+1] - -include::modules/multi-architecture-modify-machine-set.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources -* xref:../machine_management/creating_machinesets/creating-machineset-azure.adoc[Creating a compute machine set on Azure] -include::modules/multi-architecture-upgrade-mirrors.adoc[leveloffset=+1] - -include::modules/multi-architecture-import-imagestreams.adoc[leveloffset=+1] \ No newline at end of file diff --git a/security/certificate_types_descriptions/certificate-types-descriptions-index.adoc b/security/certificate_types_descriptions/certificate-types-descriptions-index.adoc deleted file mode 100644 index 7ecb92a38c21..000000000000 --- a/security/certificate_types_descriptions/certificate-types-descriptions-index.adoc +++ /dev/null @@ -1,13 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="ocp-certificates"] -= Certificate types and descriptions -include::_attributes/common-attributes.adoc[] -:context: ocp-certificates - -toc::[] - -== Certificate validation - -{product-title} monitors certificates for proper validity, for the cluster certificates it issues and manages. The {product-title} alerting framework has rules to help identify when a certificate issue is about to occur. These rules consist of the following checks: - -* API server client certificate expiration is less than five minutes. diff --git a/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc b/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc deleted file mode 100644 index 65882c92509c..000000000000 --- a/storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc +++ /dev/null @@ -1,84 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="rosa-persistent-storage-aws-efs-csi"] -= Setting up AWS Elastic File Service CSI Driver Operator -include::_attributes//attributes-openshift-dedicated.adoc[] -:context: rosa-persistent-storage-aws-efs-csi -toc::[] - -//Content similar to persistent-storage-csi-aws-efs.adoc and osd-persistent-storage-aws-efs-csi.adoc. Modules are reused. - -[IMPORTANT] -==== -This procedure is specific to the Amazon Web Services Elastic File System (AWS EFS) CSI Driver Operator, which is only applicable for {product-title} 4.10 and later versions. -==== - -== Overview - -{product-title} is capable of provisioning persistent volumes (PVs) using the Container Storage Interface (CSI) driver for AWS Elastic File Service (EFS). - -Familiarity with link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html-single/storage/index#persistent-storage-overview_understanding-persistent-storage[persistent storage] and link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html-single/storage/index#persistent-storage-csi[configuring CSI volumes] is recommended when working with a CSI Operator and driver. - -After installing the AWS EFS CSI Driver Operator, {product-title} installs the AWS EFS CSI Operator and the AWS EFS CSI driver by default in the `openshift-cluster-csi-drivers` namespace. This allows the AWS EFS CSI Driver Operator to create CSI-provisioned PVs that mount to AWS EFS assets. - -* The _AWS EFS CSI Driver Operator_, after being installed, does not create a storage class by default to use to create persistent volume claims (PVCs). However, you can manually create the AWS EFS `StorageClass`. -The AWS EFS CSI Driver Operator supports dynamic volume provisioning by allowing storage volumes to be created on-demand. -This eliminates the need for cluster administrators to pre-provision storage. - -* The _AWS EFS CSI driver_ enables you to create and mount AWS EFS PVs. - -[NOTE] -==== -AWS EFS only supports regional volumes, not zonal volumes. -==== - -include::modules/persistent-storage-csi-about.adoc[leveloffset=+1] - -:FeatureName: AWS EFS - -include::modules/persistent-storage-csi-olm-operator-install.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - -* xref:../../storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc#efs-sts_rosa-persistent-storage-aws-efs-csi[Configuring AWS EFS CSI Driver with STS] - -include::modules/persistent-storage-csi-efs-sts.adoc[leveloffset=+1] - -[role="_additional-resources"] -.Additional resources - - -* xref:../../storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc#persistent-storage-csi-olm-operator-install_rosa-persistent-storage-aws-efs-csi[Installing the AWS EFS CSI Driver Operator] - - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html-single/authentication_and_authorization/index#cco-ccoctl-configuring_cco-mode-sts[Configuring the Cloud Credential Operator utility] - -:StorageClass: AWS EFS -:Provisioner: efs.csi.aws.com - -include::modules/storage-create-storage-class.adoc[leveloffset=+1] - -include::modules/persistent-storage-csi-efs-create-volume.adoc[leveloffset=+1] - -include::modules/persistent-storage-csi-dynamic-provisioning-aws-efs.adoc[leveloffset=+1] - -If you have problems setting up dynamic provisioning, see xref:../../storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc#efs-troubleshooting_rosa-persistent-storage-aws-efs-csi[AWS EFS troubleshooting]. - - -include::modules/persistent-storage-csi-efs-static-pv.adoc[leveloffset=+1] - -If you have problems setting up static PVs, see xref:../../storage/persistent_storage/rosa-persistent-storage-aws-efs-csi.adoc#efs-troubleshooting_rosa-persistent-storage-aws-efs-csi[AWS EFS troubleshooting]. - -include::modules/persistent-storage-csi-efs-security.adoc[leveloffset=+1] - -include::modules/persistent-storage-csi-efs-troubleshooting.adoc[leveloffset=+1] - -:FeatureName: AWS EFS - -include::modules/persistent-storage-csi-olm-operator-uninstall.adoc[leveloffset=+1] - -[role="_additional-resources"] -== Additional resources - -* link:https://access.redhat.com/documentation/en-us/openshift_container_platform/4.14/html-single/storage/index#persistent-storage-csi[Configuring CSI volumes] - diff --git a/support/osd-managed-resources.adoc b/support/osd-managed-resources.adoc deleted file mode 100644 index d8fef69fd584..000000000000 --- a/support/osd-managed-resources.adoc +++ /dev/null @@ -1,54 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="osd-managed-resources"] -= {product-title} managed resources -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: osd-managed-resources - -toc::[] - -[id="osd-managed-resources-overview"] -== Overview - -The following covers all resources managed or protected by the Service Reliability Engineering Platform (SRE-P) Team. Customers should not attempt to modify these resources because doing so can lead to cluster instability. - -[id="osd-managed-resources-all"] -== Hive managed resources - -The following list displays the {product-title} resources managed by OpenShift Hive, the centralized fleet configuration management system. These resources are in addition to the OpenShift Container Platform resources created during installation. OpenShift Hive continually attempts to maintain consistency across all {product-title} clusters. Changes to {product-title} resources should be made through {cluster-manager} so that {cluster-manager} and Hive are synchronized. Contact ocm-feedback@redhat.com if {cluster-manager} does not support modifying the resources in question. - -.List of Hive managed resources -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/managed/all-osd-resources.yaml[] ----- -==== - -[id="osd-add-on-managed-namespaces"] -== {product-title} add-on namespaces - -{product-title} add-ons are services available for installation after cluster installation. These additional services include AWS CloudWatch, Red Hat CodeReady Workspaces, Red Hat OpenShift API Management, and Cluster Logging Operator. Any changes to resources within the following namespaces might be overridden by the add-on during upgrades, which can lead to unsupported configurations for the add-on functionality. - -.List of add-on managed namespaces -[%collapsible] -==== -[source,yaml] ----- -include::https://raw.githubusercontent.com/openshift/managed-cluster-config/master/resources/addons-namespaces/main.yaml[] ----- -==== - -[id="osd-validating-webhooks"] -== {product-title} validating webhooks - -{product-title} validating webhooks are a set of dynamic admission controls maintained by the OpenShift SRE team. These HTTP callbacks, also known as webhooks, are called for various types of requests to ensure cluster stability. Upon request the webhooks accept or reject the request. The following list describes the various webhooks with rules containing the registered operations and resources that are controlled. Any attempt to circumvent these validating webhooks could affect the stability and supportability of the cluster. - -.List of validating webhooks -[%collapsible] -==== -[source,json] ----- -include::https://raw.githubusercontent.com/openshift/managed-cluster-validating-webhooks/master/docs/webhooks.json[] ----- -==== diff --git a/updating/troubleshooting_updates/recovering-update-before-applied.adoc b/updating/troubleshooting_updates/recovering-update-before-applied.adoc deleted file mode 100644 index 20fd577c0d73..000000000000 --- a/updating/troubleshooting_updates/recovering-update-before-applied.adoc +++ /dev/null @@ -1,11 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="recovering-update-before-applied"] -= Recovering when an update fails before it is applied -include::_attributes/common-attributes.adoc[] -:context: troubleshooting_updates - -toc::[] - -In some situations, you can recover from a failed update of {product-title}. - -include::modules/updating-troubleshooting-clear.adoc[leveloffset=+1] diff --git a/updating/troubleshooting_updates/restoring-cluster-previous-state.adoc b/updating/troubleshooting_updates/restoring-cluster-previous-state.adoc deleted file mode 100644 index 7f6fd16e3b94..000000000000 --- a/updating/troubleshooting_updates/restoring-cluster-previous-state.adoc +++ /dev/null @@ -1,9 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="restoring-cluster-previous-state"] -= Restoring your cluster to a previous state -include::_attributes/common-attributes.adoc[] -:context: restoring-cluster-previous-state - -toc::[] - -For information on restoring your cluster to a previous state, see xref:../../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[Restoring to a previous cluster state]. \ No newline at end of file diff --git a/updating/updating-restricted-network-cluster.adoc b/updating/updating-restricted-network-cluster.adoc deleted file mode 100644 index e61711d29f13..000000000000 --- a/updating/updating-restricted-network-cluster.adoc +++ /dev/null @@ -1,216 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="updating-restricted-network-cluster"] -= Updating a restricted network cluster -include::_attributes/common-attributes.adoc[] -:context: updating-restricted-network-cluster - -toc::[] - -You can update a restricted network {product-title} cluster by using the `oc` command-line interface (CLI) or using the OpenShift Update Service. - -== Updating a restricted network cluster using the CLI - -You can update a restricted network {product-title} cluster by using the `oc` command-line interface (CLI). - -A restricted network environment is the one in which your cluster nodes cannot access the internet. For this reason, you must populate a registry with the installation images. If your registry host cannot access both the internet and the cluster, you can mirror the images to a file system that disconnected from that environment and then bring that host or removable media across that gap. If the local container registry and the cluster are connected to the mirror registry's host, you can directly push the release images to the local registry. - -If multiple clusters are present within the restricted network, mirror the required release images to a single container image registry and use that registry to update all the clusters. - -=== Prerequisites - -* Have access to the internet to obtain the necessary container images. -* Have write access to a container registry in the restricted-network environment to push and pull images. The container registry must be compatible with Docker registry API v2. -* You must have the `oc` command-line interface (CLI) tool installed. -* Have access to the cluster as a user with `admin` privileges. -See xref:../authentication/using-rbac.adoc[Using RBAC to define and apply permissions]. -* Have a recent xref:../backup_and_restore/control_plane_backup_and_restore/backing-up-etcd.adoc#backup-etcd[etcd backup] in case your update fails and you must xref:../backup_and_restore/control_plane_backup_and_restore/disaster_recovery/scenario-2-restoring-cluster-state.adoc#dr-restoring-cluster-state[restore your cluster to a previous state]. -* Ensure that all machine config pools (MCPs) are running and not paused. Nodes associated with a paused MCP are skipped during the update process. You can pause the MCPs if you are performing a canary rollout update strategy. -* If your cluster uses manually maintained credentials, ensure that the Cloud Credential Operator (CCO) is in an upgradeable state. For more information, see _Upgrading clusters with manually maintained credentials_ for xref:../installing/installing_aws/manually-creating-iam.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-aws[AWS], xref:../installing/installing_azure/manually-creating-iam-azure.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-azure[Azure], or xref:../installing/installing_gcp/manually-creating-iam-gcp.adoc#manually-maintained-credentials-upgrade_manually-creating-iam-gcp[GCP]. -//STS is not currently supported in a restricted network environment, but the following bullet can be uncommented when that changes. -//* If your cluster uses manually maintained credentials with the AWS Secure Token Service (STS), obtain a copy of the `ccoctl` utility from the release image being upgraded to and use it to process any updated credentials. For more information, see xref:../authentication/managing_cloud_provider_credentials/cco-mode-sts.adoc#sts-mode-upgrading[_Upgrading an OpenShift Container Platform cluster configured for manual mode with STS_]. -* If you run an Operator or you have configured any application with the pod disruption budget, you might experience an interruption during the upgrade process. If `minAvailable` is set to 1 in `PodDisruptionBudget`, the nodes are drained to apply pending machine configs which might block the eviction process. If several nodes are rebooted, all the pods might run on only one node, and the `PodDisruptionBudget` field can prevent the node drain. - -[id="updating-restricted-network-mirror-host"] -=== Preparing your mirror host - -Before you perform the mirror procedure, you must prepare the host to retrieve content -and push it to the remote location. - -include::modules/cli-installing-cli.adoc[leveloffset=+3] - -// this file doesn't exist, so I'm including the one that should pick up more changes from Clayton's PR - modules/installation-adding-mirror-registry-pull-secret.adoc[leveloffset=+1] - -include::modules/installation-adding-registry-pull-secret.adoc[leveloffset=+3] - -[id="update-mirror-repository_updating-restricted-network-cluster"] -=== Mirroring the {product-title} image repository - -You must mirror container images onto a mirror registry before you can update a cluster in a restricted network environment. You can also use this procedure in unrestricted networks to ensure your clusters only use container images that have satisfied your organizational controls on external content. - -There are two supported methods for mirroring images onto a mirror registry: - -* Using the oc-mirror OpenShift CLI (`oc`) plugin - -* Using the oc adm release mirror command - -Choose one of the following supported options. - -include::modules/update-mirror-repository-oc-mirror.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources - -* xref:../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] - -include::modules/update-mirror-repository.adoc[leveloffset=+3] - -include::modules/machine-health-checks-pausing.adoc[leveloffset=+2] - -include::modules/update-restricted.adoc[leveloffset=+2] - -include::modules/images-configuration-registry-mirror.adoc[leveloffset=+2] - -include::modules/generating-icsp-object-scoped-to-a-registry.adoc[leveloffset=+2] - -[id="additional-resources_security-container-signature"] -[role="_additional-resources"] -== Additional resources - -* xref:../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks] - -* xref:../post_installation_configuration/machine-configuration-tasks.adoc#machine-config-overview-post-install-machine-configuration-tasks[Machine Config Overview] - -[id="update-restricted-network-cluster-update-service"] -== Updating a restricted network cluster using the OpenShift Update Service - -include::modules/update-service-overview.adoc[leveloffset=+2] - -[role="_additional-resources"] -.Additional resources - -* xref:../updating/understanding-upgrade-channels-release.adoc#understanding-upgrade-channels_understanding-upgrade-channels-releases[Understanding upgrade channels and releases] - -For clusters with internet accessibility, Red Hat provides over-the-air updates through an {product-title} update service as a hosted service located behind public APIs. However, clusters in a restricted network have no way to access public APIs for update information. - -To provide a similar update experience in a restricted network, you can install and configure the OpenShift Update Service locally so that it is available within a disconnected environment. - -The following sections describe how to provide over-the-air updates for your disconnected cluster and its underlying operating system. - -[id="update-service-prereqs"] -=== Prerequisites - -* For more information on installing Operators, see xref:../operators/user/olm-installing-operators-in-namespace.adoc#olm-installing-operators-in-namespace[Installing Operators in your namespace]. - -[id="registry-configuration-for-update-service"] -=== Configuring access to a secured registry for the OpenShift update service - -If the release images are contained in a secure registry, complete the steps in xref:../registry/configuring-registry-operator.adoc#images-configuration-cas_configuring-registry-operator[Configuring additional trust stores for image registry access] along with following changes for the update service. - -The OpenShift Update Service Operator needs the config map key name `updateservice-registry` in the registry CA cert. - -.Image registry CA config map example for the update service -[source,yaml] ----- - apiVersion: v1 - kind: ConfigMap - metadata: - name: my-registry-ca - data: - updateservice-registry: | <1> - -----BEGIN CERTIFICATE----- - ... - -----END CERTIFICATE----- - registry-with-port.example.com..5000: | <2> - -----BEGIN CERTIFICATE----- - ... - -----END CERTIFICATE----- ----- -<1> The OpenShift Update Service Operator requires the config map key name updateservice-registry in the registry CA cert. -<2> If the registry has the port, such as `registry-with-port.example.com:5000`, `:` should be replaced with `..`. - -include::modules/images-update-global-pull-secret.adoc[leveloffset=+2] - -[id="update-service-install"] -=== Installing the OpenShift Update Service Operator - -To install the OpenShift Update Service, you must first install the OpenShift Update Service Operator by using the {product-title} web console or CLI. - -[NOTE] -==== -For clusters that are installed on restricted networks, also known as disconnected clusters, Operator Lifecycle Manager by default cannot access the Red Hat-provided OperatorHub sources hosted on remote registries because those remote sources require full internet connectivity. For more information, see xref:../operators/admin/olm-restricted-networks.adoc#olm-restricted-networks[Using Operator Lifecycle Manager on restricted networks]. -==== - -include::modules/update-service-install-web-console.adoc[leveloffset=+2] - -include::modules/update-service-install-cli.adoc[leveloffset=+2] - -include::modules/update-service-graph-data.adoc[leveloffset=+2] - -[id="update-service-mirror-release_updating-restricted-network-cluster"] -=== Mirroring the {product-title} image repository - -You must mirror container images onto a mirror registry before you can update a cluster in a restricted network environment. You can also use this procedure in unrestricted networks to ensure your clusters only use container images that have satisfied your organizational controls on external content. - -There are two supported methods for mirroring images onto a mirror registry: - -* Using the oc-mirror OpenShift CLI (`oc`) plugin - -* Using the oc adm release mirror command - -Choose one of the following supported options. - -//The module below is being used twice in this assembly, so this instance needs to have a unique context set in order for its ID to be unique. In the future, if and when the two main sections of this webpage are split into their own assemblies/pages, the context attributes below can be removed. - -:!context: -:context: osus-restricted-network-cluster - -include::modules/update-mirror-repository-oc-mirror.adoc[leveloffset=+3] - -[role="_additional-resources"] -.Additional resources - -* xref:../installing/disconnected_install/installing-mirroring-disconnected.adoc#installing-mirroring-disconnected[Mirroring images for a disconnected installation using the oc-mirror plugin] - -:!context: -:context: updating-restricted-network-cluster - -include::modules/update-service-mirror-release.adoc[leveloffset=+3] - -[id="update-service-create-service"] -=== Creating an OpenShift Update Service application - -You can create an OpenShift Update Service application by using the {product-title} web console or CLI. - -include::modules/update-service-create-service-web-console.adoc[leveloffset=+3] - -include::modules/update-service-create-service-cli.adoc[leveloffset=+3] - -[NOTE] -==== -The policy engine route name must not be more than 63 characters based on RFC-1123. If you see `ReconcileCompleted` status as `false` with the reason `CreateRouteFailed` caused by `host must conform to DNS 1123 naming convention and must be no more than 63 characters`, try creating the Update Service with a shorter name. -==== - -include::modules/update-service-configure-cvo.adoc[leveloffset=+3] - -[NOTE] -==== -See xref:../networking/enable-cluster-wide-proxy.adoc#nw-proxy-configure-object[Enabling the cluster-wide proxy] to configure the CA to trust the update server. -==== - -[id="update-service-delete-service"] -=== Deleting an OpenShift Update Service application - -You can delete an OpenShift Update Service application by using the {product-title} web console or CLI. - -include::modules/update-service-delete-service-web-console.adoc[leveloffset=+3] - -include::modules/update-service-delete-service-cli.adoc[leveloffset=+3] - -[id="update-service-uninstall"] -=== Uninstalling the OpenShift Update Service Operator - -To uninstall the OpenShift Update Service, you must first delete all OpenShift Update Service applications by using the {product-title} web console or CLI. - -include::modules/update-service-uninstall-web-console.adoc[leveloffset=+3] - -include::modules/update-service-uninstall-cli.adoc[leveloffset=+3] diff --git a/welcome/about-hcp.adoc b/welcome/about-hcp.adoc deleted file mode 100644 index 4c3aeff28fdb..000000000000 --- a/welcome/about-hcp.adoc +++ /dev/null @@ -1,100 +0,0 @@ -:_mod-docs-content-type: ASSEMBLY -[id="about-hcp"] -= Learn more about ROSA with HCP -include::_attributes/common-attributes.adoc[] -include::_attributes/attributes-openshift-dedicated.adoc[] -:context: about-hcp - -toc::[] - -{hcp-title-first} offers a reduced-cost solution to create a managed ROSA cluster with a focus on efficiency. You can quickly create a new cluster and deploy applications in minutes. - -== Key features of {hcp-title} - -* {hcp-title} requires a minimum of only two nodes, making it ideal for smaller projects while still being able to scale to support larger projects and enterprises. - -* The underlying control plane infrastructure is fully managed. Control plane components, such as the API server and etcd database, are hosted in a Red Hat-owned AWS account. - -* Provisioning time is approximately 10 minutes. - -* Customers can upgrade the control plane and machine pools separately, which means they do not have to shut down the entire cluster during upgrades. - -== Getting started with {hcp-title} - -Use the following sections to find content to help you learn about and use {hcp-title}. - -[id="architect"] -=== Architect - -[options="header",cols="3*"] -|=== -| Learn about {hcp-title} |Plan {hcp-title} deployment |Additional resources - -| xref:../architecture/index.adoc#architecture-overview[Architecture overview] -| xref:../rosa_backing_up_and_restoring_applications/backing-up-applications.adoc#rosa-backing-up-applications[Back up and restore] -| xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-life-cycle.adoc#rosa-hcp-life-cycle[{hcp-title} life cycle] - -| xref:../architecture/rosa-architecture-models.adoc#rosa-architecture-models[{hcp-title} architecture] -| -| xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-service-definition.adoc#rosa-hcp-service-definition[{hcp-title} service definition] - -| -| -| xref:../support/index.adoc#support-overview[Getting support] -|=== - - -[id="cluster-administrator"] -=== Cluster Administrator - -[options="header",cols="4*"] -|=== -|Learn about {hcp-title} |Deploy {hcp-title} |Manage {hcp-title} |Additional resources - -| xref:../architecture/rosa-architecture-models.adoc#rosa-architecture-models[{hcp-title} architecture] -| xref:../rosa_hcp/rosa-hcp-sts-creating-a-cluster-quickly.adoc#rosa-hcp-sts-creating-a-cluster-quickly[Installing {hcp-title}] -| xref:../observability/logging/cluster-logging.adoc#cluster-logging[Logging] -| xref:../support/index.adoc#support-overview[Getting Support] - -| link:https://learn.openshift.com/?extIdCarryOver=true&sc_cid=701f2000001Css5AAC[OpenShift Interactive Learning Portal] -| xref:../storage/index.adoc#storage-overview[Storage] -| xref:../observability/monitoring/monitoring-overview.adoc#monitoring-overview_virt-monitoring-overview[Monitoring overview] -| xref:../rosa_architecture/rosa_policy_service_definition/rosa-hcp-life-cycle.adoc#rosa-hcp-life-cycle[{hcp-title} life cycle] - -| -| xref:../rosa_backing_up_and_restoring_applications/backing-up-applications.adoc#rosa-backing-up-applications[Back up and restore] -| -| - -| -| xref:../upgrading/rosa-hcp-upgrading.adoc#rosa-hcp-upgrading[Upgrading] -| -| - -|=== - - -[id="Developer"] -=== Developer - -[options="header",cols="3*"] -|=== -|Learn about application development in {hcp-title} |Deploy applications |Additional resources - -| link:https://developers.redhat.com/[Red Hat Developers site] -| xref:../applications/index.adoc#building-applications-overview[Building applications overview] -| xref:../support/index.adoc#support-overview[Getting support] - -| link:https://developers.redhat.com/products/openshift-dev-spaces/overview[{openshift-dev-spaces-productname} (formerly Red Hat CodeReady Workspaces)] -| xref:../operators/index.adoc#operators-overview[Operators overview] -| - -| -| xref:../openshift_images/index.adoc#overview-of-images[Images] -| - -| -| xref:../cli_reference/odo-important-update.adoc#odo-important_update[Developer-focused CLI] -| - -|===