diff --git a/pkg/harvester/config/harvester-map.js b/pkg/harvester/config/harvester-map.js index bf49cf949a2..02cbfe25063 100644 --- a/pkg/harvester/config/harvester-map.js +++ b/pkg/harvester/config/harvester-map.js @@ -23,7 +23,7 @@ export const InterfaceOption = [{ export const SOURCE_TYPE = { NEW: 'New', - IMAGE: 'VM Image', + IMAGE: 'Virtual Machine Image', ATTACH_VOLUME: 'Existing Volume', CONTAINER: 'Container' }; diff --git a/pkg/harvester/config/harvester.js b/pkg/harvester/config/harvester.js index 4cb4ecee538..63774d06d82 100644 --- a/pkg/harvester/config/harvester.js +++ b/pkg/harvester/config/harvester.js @@ -37,8 +37,8 @@ import { import { IF_HAVE } from '@shell/store/type-map'; const TEMPLATE = HCI.VM_VERSION; -const MONITORING_GROUP = 'Monitoring & Logging::Monitoring'; -const LOGGING_GROUP = 'Monitoring & Logging::Logging'; +const MONITORING_GROUP = 'Monitoring and Logging::Monitoring'; +const LOGGING_GROUP = 'Monitoring and Logging::Logging'; export const PRODUCT_NAME = 'harvester'; diff --git a/pkg/harvester/l10n/en-us.yaml b/pkg/harvester/l10n/en-us.yaml index 62dd4271cc5..b8efc9b3df7 100644 --- a/pkg/harvester/l10n/en-us.yaml +++ b/pkg/harvester/l10n/en-us.yaml @@ -10,10 +10,10 @@ generic: nav: group: networks: Networks - backupAndSnapshot: Backup & Snapshot + backupAndSnapshot: Backup and Snapshots Monitoring: Monitoring Logging: Logging - 'Monitoring & Logging': Monitoring & Logging + 'Monitoring and Logging': Monitoring and Logging resourceTable: groupBy: @@ -28,7 +28,7 @@ members: asyncButton: restart: - action: Save & Restart + action: Save and Restart success: Restarted waiting: Restarting… @@ -40,7 +40,7 @@ harvester: addBackup: Add Backup restore: success: 'Restore { name } created successfully.' - title: Backup and restore + title: Backup and Restore selectBackup: Select Backup message: backup: Please select the backup that needs to be restored. @@ -53,16 +53,16 @@ harvester: success: 'Template { templateName } created successfully.' failed: 'Failed generated template!' cloneVM: - title: Clone VM - name: New VM Name + title: Clone Virtual Machine + name: New Virtual Machine Name type: Clone volume data action: create: Create clone: Clone message: - tip: Please enter a VM name! - success: 'VM { name } cloned successfully.' - failed: 'Failed clone VM!' + tip: Please enter a virtual machine name! + success: 'Virtual machine { name } cloned successfully.' + failed: 'Failed clone virtual machine!' exportImage: title: Export to Image name: Name @@ -77,21 +77,21 @@ harvester: label: Target Node placeholder: Choose Target Node ejectCDROM: - title: Eject CDROM + title: Eject CD-ROM warnTip: Eject volume will restart the virtual machine. operationTip: 'Select the volume you want to delete:' delete: Delete bundle: - title: Generate Support Bundle + title: Generate a Support Bundle url: Issue URL description: Description requiredDesc: Description is required! - titleDescription: Collect system-related log in Harvester, to help with troubleshooting and support. + titleDescription: Collect system-related logs in Harvester to help with troubleshooting and support. hotplug: - success: 'Volume { diskName } is mounted to the VM { vm }.' + success: 'Volume { diskName } is mounted to the virtual machine { vm }.' title: Add Volume hotunplug: - success: 'Volume { name } is detach successfully.' + success: 'Volume { name } is detached successfully.' snapshot: title: Take Snapshot name: Name @@ -109,10 +109,10 @@ harvester: vmSnapshot: title: Take VM Snapshot name: Name - success: 'Take VM Snapshot { name } successfully.' + success: 'Take virtual machine Snapshot { name } successfully.' restart: - title: Restart VM - tip: Restart the Virtual Machine now for configuration changes to take effect. + title: Restart Virtual Machine + tip: Restart the virtual machine for configuration changes to take effect. cancel: Save notification: title: @@ -130,11 +130,11 @@ harvester: deepClone: Clone shallowClone: Clone Template unpause: Unpause - ejectCDROM: Eject CDROM + ejectCDROM: Eject CD-ROM launchFormTemplate: Launch instance from template modifyTemplate: Modify template (Create new version) setDefaultVersion: Set default version - addTemplateVersion: Add templateVersion + addTemplateVersion: Add template version backup: Take Backup restore: Restore restoreNewVM: Restore New @@ -152,7 +152,7 @@ harvester: cancelExpand: Cancel Expand snapshot: Take Snapshot pvcClone: Clone Volume - vmSnapshot: Take VM Snapshot + vmSnapshot: Take Virtual Machine Snapshot shutdown: Shut Down powerOn: Power On reboot: Reboot @@ -163,13 +163,13 @@ harvester: progress: Progress message: Message phase: Phase - attachedVM: Attached VM + attachedVM: Attached Virtual Machine fingerprint: Fingerprint value: Value actions: Actions readyToUse: Ready To Use backupTarget: Backup Target - targetVm: Target VM + targetVm: Target Virtual Machine hostIp: Host IP vm: ipAddress: IP Address @@ -177,7 +177,7 @@ harvester: defaultVersion: Default Version network: type: Type - vlan: Vlan ID + vlan: VLAN ID snapshotTargetVolume: Original Volume volumeSnapshotCounts: Snapshot Counts networkState: Network State @@ -189,8 +189,8 @@ harvester: advanced: Advanced Options accessCredentials: Access Credentials pciDevices: PCI Devices - vGpuDevices: VGPU Devices - vmScheduling: VM Scheduling + vGpuDevices: vGPU Devices + vmScheduling: Virtual Machine Scheduling instanceLabel: Instance Labels fields: version: Version @@ -201,24 +201,24 @@ harvester: volume: Volume network: Network model: Model - macAddress: Mac Address + macAddress: MAC address port: Port protocol: Protocol - remove: REMOVE + remove: Remove PhysicalNic: Physical NIC - cpu: Cpu + cpu: CPU memory: Memory virtualName: Virtual machine name promiscuous: Promiscuous - ipv4Address: IPv4 Address - filterLabels: Filter Labels - storageClass: Storage Class - dockerImage: Docker Image + ipv4Address: IPv4 address + filterLabels: Filter labels + storageClass: Storage class + dockerImage: Docker image pci: available: Available Devices compatibleNodes: Compatible Nodes impossibleSelection: 'There are no hosts with all of the selected devices.' - howToUseDevice: 'Use the table below to enable PCI passthrough on each device you want to use in this VM.' + howToUseDevice: 'Use the table below to enable PCI passthrough on each device you want to use in this virtual machine.' deviceInTheSameHost: 'You can only select devices on the same host.' oldFormatDevices: help: |- @@ -229,11 +229,11 @@ harvester: {oldFormatDevicesHTML}
- Please use the following instructions to update the VM: + Please use the following instructions to update the virtual machine:
hostDevices
section, and save VM the changes to the YAML file.hostDevices
section, and save virtual machine the changes to the YAML file.raw
and qcow2
image formats which are supported by qemu. Bootable ISO images can also be used and are treated like raw
images.'
+ urlTip: 'Supports the raw
and qcow2
image formats which are supported by qemu. Bootable ISO images can also be used and are treated like raw
images.'
fileName: File Name
uploadFile: Upload File
source: Source
@@ -726,10 +726,10 @@ harvester:
tips:
notExistImage:
title: Image {name} does not exist!
- message: Please select a new Image.
+ message: Please select a new image.
notExistNode:
title: Node {name} does not exist!
- message: Please select a new Node.
+ message: Please select a new node.
upgradePage:
upgradeApp: Upgrade Software
@@ -746,8 +746,8 @@ harvester:
selectExisting: Select Existing Image
createRepository: Creating Upgrade Repository
succeeded: Succeeded
- releaseTip: Please read the upgrade documentation carefully. You can view details on the Harvester Release Note.
- checkReady: I have read and understood the upgrade content related to this Harvester version.
+ releaseTip: Please read the upgrade documentation carefully. You can view details on the Harvester Release Notes.
+ checkReady: I have read and understood the upgrade instructions related to this Harvester version.
pending: Pending
repoInfo:
upgradeStatus: Upgrade Status
@@ -757,7 +757,7 @@ harvester:
harvesterChart: Harvester Chart
success: Success
fail: Fail
- ongoing: on-going
+ ongoing: On-going
downloadLog: Download Log
logStatus: Log Download Status
dismissMessage: Dismiss it
@@ -765,21 +765,21 @@ harvester:
warning: WARNING
doc: Read the documentation before starting the upgrade process. Ensure that you complete procedures that are relevant to your environment and the version you are upgrading to.
tip: Unmet system requirements and incorrectly performed procedures may cause complete upgrade failure and other issues that require manual workarounds.
- moreNotes: For more details about the release notes, please visit -
+ moreNotes: For more details about the release notes, please visit -
backup:
- label: VM Backups
+ label: Virtual Machine Backups
createText: Restore Backup
title: Restore Virtual Machine
backupTargetTip: The endpoint used to access the backupstore. NFS and S3 are supported.
message:
noSetting:
- prefix: You must configure the backup target in
+ prefix: You must configure the backup target
middle: 'setting'
suffix: before creating a new backup.
errorTip:
- prefix: Backup Target value in
- middle: Setting
+ prefix: Backup target value in
+ middle: setting
suffix: "is invalid, error: "
viewSetting:
prefix: Click
@@ -798,7 +798,7 @@ harvester:
virtualMachineName: Virtual Machine Name
keepMacAddress: Keep MAC Address
matchTarget: The current backup target does not match the existing one.
- progress:
+ progress:
details: Volume details
tooltip:
starting: Backup initiating
@@ -814,21 +814,21 @@ harvester:
complete: Restore completed
network:
- label: VM Networks
+ label: Virtual Machine Networks
tabs:
basics: Basics
layer3Network: Route
clusterNetwork:
label: Cluster Network
- create: Create a New Cluster Network
+ create: Create a new cluster network
toolTip: Define your custom cluster scope network name
- createPlaceholder: Input a new Cluster Network name
- selectOrCreatePlaceholder: Select or Create a new Cluster Network
- selectPlaceholder: Select a Cluster Network
+ createPlaceholder: Input a new cluster network name
+ selectOrCreatePlaceholder: Select or create a new cluster network
+ selectPlaceholder: Select a cluster network
layer3Network:
mode:
label: Mode
- auto: Auto(DHCP)
+ auto: Auto (DHCP)
manual: Manual
serverIPAddr:
label: DHCP Server IP
@@ -852,9 +852,9 @@ harvester:
validation:
physicalNIC: DefaultPhysicalNIC
placeholder:
- accessKeyId: specify your access key id
- secretAccessKey: specify your secret access key
- cert: upload a self-signed SSL certificate
+ accessKeyId: Specify your access key ID
+ secretAccessKey: Specify your secret access key
+ cert: Upload a self-signed SSL certificate
vlanChangeTip: The newly modified default network interface only applies to newly added nodes, not existing ones.
defaultPhysicalNIC: Default Network Interface
percentTip: The value in parentheses represents the distribution percentage of the network interface on all hosts. If an interface less than 100% is selected, the user needs to manually specify the network interface on the host where the vlan network configuration fails.
@@ -879,7 +879,7 @@ harvester:
placeholder: e.g. 172.16.0.1/32
invalid: '"Exclude list" is invalid.'
addIp: Add Exclude IP
- warning: 'WARNING: Number of IPs Required = Number of Nodes * 4 + Number of Disks * 2 + Number of Images to Download/Upload
. For more information about storage network settings, see the documentation.'
vmForceDeletionPolicy:
period: Period
@@ -917,7 +917,7 @@ harvester:
upgrade:
selectExitImage: Please select the OS image to upgrade.
- imageUrl: Please input a valid image url.
+ imageUrl: Please input a valid image URL.
chooseFile: Please select to upload an image.
checksum: Checksum
harvesterMonitoring:
@@ -933,16 +933,16 @@ harvester:
retention: How long to retain metrics
retentionSize: Maximum size of metrics
clusterRegistrationUrl:
- message: To completely unset the imported Harvester cluster, please also remove it on the Rancher dashboard UI via the Virtualization Management
page.
+ message: To completely unset the imported Harvester cluster, please also remove it on the Rancher Dashboard UI via the Virtualization Management
page.
ntpServers:
isNotIPV4: The address you entered is not IPv4 or host. Please enter a valid IPv4 address or a host address.
isDuplicate: There are duplicate NTP server configurations.
cloudTemplate:
- label: Cloud Config Templates
+ label: Cloud Configuration Templates
templateType: Template Type
userData: User Data
networkData: Network Data
-
+
support:
title: Harvester Support
kubeconfig:
@@ -971,7 +971,7 @@ harvester:
cidr:
label: CIDR/IP Range
invalid: '"CIDR/IP Range" is invalid.'
- toolTip: "We can apply multiple pools or ranges by seperating them with commas. i.e. 192.168.0.200/30,192.168.0.200/29 or 192.168.0.10-192.168.0.11"
+ toolTip: "We can apply multiple pools or ranges by separating them with commas. For example: 192.168.0.200/30,192.168.0.200/29 or 192.168.0.10-192.168.0.11"
add:
label: Add IP Pools
@@ -980,22 +980,22 @@ harvester:
label: Protocols
ciphers:
label: Ciphers
-
+
monitoring:
configuration:
label: Configuration
alertmanagerConfig:
label: Alertmanager Configs
diabledMonitoringTips:
- prefix: 'You must enable'
- middle: 'Monitoring'
- suffix: 'addon at first.'
+ prefix: 'Enable the'
+ middle: 'monitoring'
+ suffix: 'add-on first.'
diabledAlertingTips:
- prefix: 'You must enable'
+ prefix: 'Enable'
middle: 'Alertmanager'
- suffix: 'for configs to take effect.'
+ suffix: 'for configuration to take effect.'
disabledAddon:
- prefix: 'Monitoring Addon is disabled now, click'
+ prefix: 'The monitoring add-on is disabled, click'
middle: 'here'
suffix: 'to enable it.'
@@ -1013,9 +1013,9 @@ harvester:
output:
label: Output
diabledTips:
- prefix: 'You must enable'
- middle: 'Logging'
- suffix: 'for configs to take effect.'
+ prefix: 'Enable'
+ middle: 'logging'
+ suffix: 'for configuration to take effect.'
snapshot:
label: Volume Snapshots
@@ -1024,7 +1024,7 @@ harvester:
image: Image
vmSnapshot:
- label: VM Snapshots
+ label: Virtual Machine Snapshots
createText: Restore Snapshot
snapshot: Snapshot
@@ -1046,7 +1046,7 @@ harvester:
title: Storage Classes
customize:
volumeBindingMode:
- later: Bind and provision a persistent volume once a VM using the PersistentVolumeClaim is created
+ later: Bind and provision a persistent volume once a virtual machine using the PersistentVolumeClaim is created
parameters:
numberOfReplicas:
label: Number Of Replicas
@@ -1062,11 +1062,11 @@ harvester:
label: Migratable
allowedTopologies:
title: Allowed Topologies
- tooltip: Allowed Topologies helps scheduling VMs on hosts which match all of below expressions.
+ tooltip: Allowed Topologies helps scheduling virtual machines on hosts which match all of below expressions.
vlanConfig:
- title: Network Configs
- createNetworkConfig: Create Network Config
+ title: Network Configuration
+ createNetworkConfig: Create Network Configuration
action:
migrate: Migrate
titles:
@@ -1088,7 +1088,7 @@ harvester:
validate:
available: NIC "{nic}" is not available on the selected nodes
linkAttributes:
- mtu:
+ mtu:
label: MTU
bondOptions:
mode:
@@ -1107,39 +1107,39 @@ harvester:
vlanStatus:
vlanConfig:
- label: Network Config
-
+ label: Network Configuration
+
clusterNetwork:
- title: Cluster Networks/Configs
+ title: Cluster Network Configuration
create:
button:
- label: Create Cluster Network
- clusterNetwork: There are no network configs defined.
+ label: Create a Cluster Network
+ clusterNetwork: There are no network configurations defined.
mgmt: mgmt is a built-in cluster management network and does not support any additional network configurations.
notExist: 'Cluster Network "{ clusterNetwork }" does not exist'
notReady: 'Cluster Network "{ clusterNetwork }" is not ready'
-
+
addons:
descriptions:
- 'harvester-system/vm-import-controller': vm-import-controller is an addon to help migrate VM workloads from other source clusters to an existing Harvester cluster.
- 'harvester-system/pcidevices-controller': pcidevices-controller is an addon to help discover PCI devices for nodes in your cluster and allow users to prepare devices for PCI Passthrough, for use with Harvester VM and guest Clusters.
- 'cattle-logging-system/rancher-logging': rancher-logging is an addon to collect versatile logs, events and audits from the Harvester cluster and route them to many kinds of servers based on flows.
- 'harvester-system/rancher-vcluster': rancher-vcluster deploys a vcluster with rancher installed.
- 'cattle-monitoring-system/rancher-monitoring': rancher-monitoring is an addon to collect Harvester cluster and VM metrics, view them on the embedded dashboard, and send alert(s) to remote servers.
- 'vm-import-controller': vm-import-controller is an addon to help migrate VM workloads from other source clusters to an existing Harvester cluster.
- 'pcidevices-controller': pcidevices-controller is an addon to help discover PCI devices for nodes in your cluster and allow users to prepare devices for PCI Passthrough, for use with Harvester VM and guest Clusters.
- 'nvidia-driver-toolkit': 'nvidia-driver-toolkit is an addon to enable vGPU devices and assign them to Harvester VMs.'
- 'rancher-logging': rancher-logging is an addon to collect versatile logs, events and audits from the Harvester cluster and route them to many kinds of servers based on flows.
- 'rancher-monitoring': rancher-monitoring is an addon to collect Harvester cluster and VM metrics, view them on the embedded dashboard, and send alert(s) to remote servers.
- 'rancher-vcluster': rancher-vcluster deploys a vcluster with rancher installed.
- 'harvester-seeder': harvester-seeder is an addon that uses ipmi and redfish to discover hardware information and perform out-of-band operations.
- 'harvester-system/harvester-seeder': harvester-seeder is an addon that uses ipmi and redfish to discover hardware information and perform out-of-band operations.
+ 'harvester-system/vm-import-controller': vm-import-controller is an add-on to help migrate virtual machine workloads from other source clusters to an existing Harvester cluster.
+ 'harvester-system/pcidevices-controller': pcidevices-controller is an add-on to help discover PCI devices for nodes in your cluster and allow users to prepare devices for PCI Passthrough, for use with Harvester virtual machine and guest clusters.
+ 'cattle-logging-system/rancher-logging': rancher-logging is an add-on to collect versatile logs, events, and audits from the Harvester cluster and route them to many kinds of servers based on flows.
+ 'harvester-system/rancher-vcluster': rancher-vcluster deploys a virtual cluster (vcluster) with Rancher installed.
+ 'cattle-monitoring-system/rancher-monitoring': rancher-monitoring is an add-on that collects Harvester cluster and virtual machine metrics and allows you to view the metrics on an embedded dashboard and send alert(s) to remote servers.
+ 'vm-import-controller': vm-import-controller is an add-on to help migrate virtual machine workloads from other source clusters to an existing Harvester cluster.
+ 'pcidevices-controller': pcidevices-controller is an add-on to help discover PCI devices for nodes in your cluster and allow users to prepare devices for PCI Passthrough, for use with Harvester virtual machines and guest clusters.
+ 'nvidia-driver-toolkit': 'nvidia-driver-toolkit is an add-on to enable vGPU devices and assign them to Harvester virtual machines.'
+ 'rancher-logging': rancher-logging is an add-on to collect versatile logs, events, and audits from the Harvester cluster and route them to many kinds of servers based on flows.
+ 'rancher-monitoring': rancher-monitoring is an add-on to collect Harvester cluster and virtual machine metrics, view them on the embedded dashboard, and send alert(s) to remote servers.
+ 'rancher-vcluster': rancher-vcluster deploys a virtual cluster (vcluster) with Rancher installed.
+ 'harvester-seeder': harvester-seeder is an add-on that uses IPMI and Redfish to discover hardware information and perform out-of-band operations.
+ 'harvester-system/harvester-seeder': harvester-seeder is an add-on that uses IPMI and Redfish to discover hardware information and perform out-of-band operations.
vmImport:
titles:
basic: Basic
pvc: Volume
rancherVcluster:
- accessRancher: Access Rancher Dashboard
+ accessRancher: Access the Rancher Dashboard
hostname: Hostname
rancherVersion: Rancher Version
password: Bootstrap Password
@@ -1200,12 +1200,12 @@ harvester:
label: Backend Servers
healthCheck:
warning:
- portInUse: Warning, Backend Port {port} is in use in Health Check settings; in case of updating the port, update the Health Check settings accordingly.
+ portInUse: Warning, the Backend Port {port} is in use in Health Check settings. If you need to update the port, update the Health Check settings accordingly.
ipPool:
label: IP Pools
network:
- label: VM Network
+ label: Virtual Machine Network
tabs:
range: Range
scope: Scope
@@ -1235,13 +1235,13 @@ harvester:
addLabel: Add CIDR
range:
addLabel: Add Range
-
+
service:
healthCheckPort:
label: Health Check Port
healthCheckSuccessThreshold:
label: Health Check Success Threshold
- description: If the number of times the prober continuously detects an address successfully reaches the success threshold, then the backend server can start to forward traffic.
+ description: If the number of times the probe continuously detects an address successfully reaches the success threshold, then the backend server can start to forward traffic.
healthCheckFailureThreshold:
label: Health Check Failure Threshold
description: The backend server will stop forwarding traffic if the number of health check failures reaches the failure threshold.
@@ -1266,22 +1266,22 @@ harvester:
sriovgpu:
label: SR-IOV GPU Devices
nodeName: Node
- numVFs: Number Of Virtual Functions
+ numVFs: Number of Virtual Functions
vfAddresses: Virtual Functions Addresses
vGpuDevices: vGPU Devices
showMore: Show More
parentSriov: Filter By Parent SR-IOV GPU
- noPermission: Please contact system admin to add Harvester addons first
+ noPermission: Please contact your system admiistrator to add Harvester add-ons first.
goSetting:
- prefix: The nvidia-driver-toolkit addon is not enabled, click
+ prefix: The nvidia-driver-toolkit add-on is not enabled, click
middle: here
suffix: to enable it to manage your SR-IOV GPU devices.
vgpu:
label: vGPU Devices
- noPermission: Please contact system admin to add Harvester addons first
+ noPermission: Please contact system administrator to add Harvester add-ons first.
goSetting:
- prefix: The nvidia-driver-toolkit addon is not enabled, click
+ prefix: The nvidia-driver-toolkit add-on is not enabled, click
middle: here
suffix: to enable it to manage your vGPU devices.
enableGroup: Enable Group
@@ -1292,22 +1292,22 @@ harvester:
available: Available Devices
compatibleNodes: Compatible Nodes
impossibleSelection: 'There are no hosts with all of the selected devices.'
- howToUseDevice: 'Use the table below to enable vGPU devices you want to use in this VM.'
+ howToUseDevice: 'Use the table below to enable vGPU devices you want to use in this virtual machine.'
deviceInTheSameHost: 'You can only select devices on the same host.'
harvesterVlanConfigMigrateDialog:
- targetClusterNetwork:
+ targetClusterNetwork:
label: Target Cluster Network
placeholder: Choose Target Cluster Network
seeder:
banner:
enable:
- prefix: Addon "harvester-seeder" is disabled now,
- middle: click here
+ prefix: The "harvester-seeder" add-on is disabled.
+ middle: Click here
suffix: to enable it.
- noAccess: Please contact system admin to enable the Out-of-Band Access first.
- noAddon: Addon "harvester-seeder" is not exist, please check if it is installed.
+ noAccess: Please contact your system administrator to enable the Out-of-Band Access first.
+ noAddon: The "harvester-seeder" add-on does not exist, please check if it is installed.
noInventory: Waiting for "inventories.metal.harvesterhci.io" to be ready.
inventory:
host:
@@ -1328,10 +1328,10 @@ harvester:
label: Polling Interval
affinity:
- thisPodNamespace: This VM's namespace
+ thisPodNamespace: This virtual machine's namespace
matchExpressions:
inNamespaces: "Workloads in these namespaces"
- vmAffinityTitle: VM Scheduling
+ vmAffinityTitle: Virtual Machine Scheduling
namespaces:
placeholder: e.g. default,system,base
label: Namespaces
@@ -1343,35 +1343,35 @@ advancedSettings:
technicalPreview: 'Technical Previews allow users to test and evaluate early-access functionality prior to official supported releases'
descriptions:
'harv-vlan': Default Network Interface name of the VLAN network.
- 'harv-backup-target': Custom backup target to store VM backups.
- 'branding': Branding allows administrators to globally re-brand the UI by customizing the Harvester product name, logos and color scheme.
- 'harv-csi-driver-config': Configure additional information for csi drivers.
+ 'harv-backup-target': Custom backup target to store virtual machine backups.
+ 'branding': Branding allows administrators to globally re-brand the UI by customizing the Harvester product name, logos, and color scheme.
+ 'harv-csi-driver-config': Configure additional information for CSI drivers.
'harv-containerd-registry': Containerd Registry Configuration to connect private registries.
- 'harv-log-level': Configure Harvester server log level. Default to info.
+ 'harv-log-level': Configure Harvester server log level. Defaults to Info.
'harv-server-version': Harvester server version.
- 'harv-upgrade-checker-enabled': Specify whether to enable Harvester upgrade check or not. Default is true.
- 'harv-upgrade-checker-url': Default Harvester upgrade check url. Only used when the upgrade-checker-enabled
is equal to true.
- 'harv-ui-source': Config how to load the UI source.
+ 'harv-upgrade-checker-enabled': Specifies whether to enable Harvester upgrade check or not. Default is True.
+ 'harv-upgrade-checker-url': Default Harvester upgrade check url. Only used when the upgrade-checker-enabled
is equal to True.
+ 'harv-ui-source': Configure how to load the UI source.
'harv-ui-index': 'HTML index location for the UI.'
- 'harv-ui-plugin-index': 'JS index location for the harvester plugin UI.'
- 'harv-cluster-registration-url': Registration URL for mutil-cluster management.
+ 'harv-ui-plugin-index': 'JS index location for the Harvester plugin UI.'
+ 'harv-cluster-registration-url': Registration URL for multi-cluster management.
'harv-http-proxy': 'HTTP proxy for Harvester to access external services.'
'harv-additional-ca': 'Custom CA root certificates for TLS validation.'
'harv-overcommit-config': 'Resource overcommit configuration.'
- 'harv-support-bundle-timeout': 'Support Bundle timeout config in minutes, use 0 to disable the timeout.'
- 'harv-support-bundle-expiration': 'Support Bundle expiration config in minutes.'
- 'harv-support-bundle-node-collection-timeout': 'Support Bundle node collection timeout config in minutes.'
- 'harv-vm-force-reset-policy': Config the force-reset action when a VM is stuck on a node that is down.
+ 'harv-support-bundle-timeout': 'Support bundle timeout configuration in minutes, use 0 to disable the timeout.'
+ 'harv-support-bundle-expiration': 'Support bundle expiration configuration in minutes.'
+ 'harv-support-bundle-node-collection-timeout': 'Support bundle node collection timeout configuration in minutes.'
+ 'harv-vm-force-reset-policy': Configuration for the force-reset action when a virtual machine is stuck on a node that is down.
'harv-ssl-parameters': Custom SSL Parameters for TLS validation.
'harv-storage-network': 'Longhorn storage-network setting.'
'harv-support-bundle-namespaces': Specify resources in other namespaces to be collected by the support package.
- 'harv-auto-disk-provision-paths': Specify the disks(using glob pattern) that Harvester will automatically add as VM storage.
+ 'harv-auto-disk-provision-paths': Specify the disks(using glob pattern) that Harvester will automatically add as virtual machine storage.
'harv-support-bundle-image': Support bundle image configuration. Find different versions in rancher/support-bundle-kit.
'harv-release-download-url': This setting allows you to configure the upgrade release download
URL address. Harvester will get the ISO URL and checksum value from the ($URL
/$VERSION
/version.yaml) file hosted by the configured URL.
- 'harv-default-vm-termination-grace-period-seconds': Config the VM termination grace period for VM stop.
+ 'harv-default-vm-termination-grace-period-seconds': Configure the virtual machine termination grace period for virtual machine stop.
'harv-ntp-servers': Configure NTP server. You can configure multiple IPv4 addresses or host addresses.
'harv-auto-rotate-rke2-certs': The certificate rotation mechanism relies on Rancher. Harvester will automatically update certificates generation to trigger rotation.
- 'harv-kubeconfig-default-token-ttl-minutes': 'TTL (in minutes) applied on Harvester admin kubeconfig files. Default is 0, which means to never expire.'
+ 'harv-kubeconfig-default-token-ttl-minutes': 'TTL (in minutes) applied on Harvester administration kubeconfig files. Default is 0, which means to never expire.'
'harv-longhorn-v2-data-engine-enabled': 'Enable the Longhorn V2 data engine. Default is false. {username}
; all the global permissions, project, and cluster role bindings of this {vendor} user will also apply to the {provider} user.'
github:
clientId:
@@ -384,7 +384,7 @@ authConfig:
3: cattle-resource-system
namespace that has an encryption-provider-config.yaml
key. {namespace}
, already exists and cannot be added to a different project."
- project: Install into Project
+ project: Install Into Project
section:
chartOptions: Edit Options
valuesYaml: Edit YAML
@@ -931,8 +931,8 @@ catalog:
} the {existing, select,
true { app}
false { chart}
- }. Start by setting some basic information used by {vendor} to manage the App.
- nsCreationDescription: "To install the app into a new namespace enter it's name in the Namespace field and select it."
+ }. Start by setting some basic information used by {vendor} to manage the application.
+ nsCreationDescription: "To install the application into a new namespace, enter the name in the Namespace field and select it."
createNamespace: "Namespace {namespace}
will be created."
clusterTplVersion:
label: Version
@@ -940,19 +940,19 @@ catalog:
description: Select a version of the Cluster Template
clusterTplValues:
label: Values
- subtext: Change how the Cluster is defined
- description: Configure Values used by Helm that help define the Cluster.
+ subtext: Change how the cluster is defined
+ description: Configure Values used by Helm that help define the cluster.
helmValues:
label: Values
- subtext: Change how the App works
- description: Configure Values used by Helm that help define the App.
+ subtext: Change how the application works
+ description: Configure values used by Helm that help define the application.
chartInfo:
- button: View Chart Info
- label: Chart Info
+ button: View Chart Information
+ label: Chart Information
helmCli:
- checkbox: Customize Helm options before install
+ checkbox: Customize Helm options before installation
label: Helm Options
- subtext: Change how the app is deployed
+ subtext: Change how the application is deployed
description: Supply additional deployment options
version: Version
versions:
@@ -971,7 +971,7 @@ catalog:
gitBranch:
label: Git Branch
placeholder: e.g. master
- defaultMessage: 'Will default to "master" if left blank'
+ defaultMessage: 'The branch will default to "master" if left blank'
gitRepo:
label: Git Repo URL
placeholder: 'e.g. https://github.com/your-company/charts.git'
@@ -1103,7 +1103,7 @@ cluster:
rke2-multus: Multus Configuration
agentEnvVars:
label: Agent Environment
- detail: Add additional environment variables to the agent container. This is most commonly useful for configuring a HTTP proxy.
+ detail: Add additional environment variables to the agent container. This is most commonly useful for configuring a HTTP proxy.
keyLabel: Variable Name
cloudProvider:
aws:
@@ -1116,7 +1116,7 @@ cluster:
label: Google
rancher-vsphere:
label: vSphere
- note: 'Important: Configure the vSphere Cloud Provider and Storage Provider options in the Add-On Config tab.'
+ note: 'Important: Configure the vSphere Cloud Provider and Storage Provider options in the Add-On Configuration tab.'
harvester:
label: Harvester
copyConfig: Copy KubeConfig to Clipboard
@@ -1124,10 +1124,10 @@ cluster:
custom:
nodeRole:
label: Node Role
- detail: Choose what roles the node will have in the cluster. The cluster needs to have at least one node with each role.
+ detail: Choose what roles the node will have in the cluster. The cluster needs to have at least one node with each role.
advanced:
label: Advanced
- detail: Additional control over how the node will be registered. These values will often need to be different for each node registered.
+ detail: Additional control over how the node will be registered. These values will often need to be different for each node registered.
nodeName: Node Name
publicIp: Node Public IP
privateIp: Node Private IP
@@ -1140,14 +1140,14 @@ cluster:
windowsDetail: Run this command in PowerShell on each of the existing Windows machines you want to register. Windows nodes can only be workers.
windowsNotReady: The cluster must be up and running with Linux etcd, control plane, and worker nodes before the registration command for adding Windows workers will display.
windowsWarning: Workload pods, including some deployed by Rancher charts, will be scheduled on both Linux and Windows nodes by default. Edit NodeSelector in the chart to direct them to be placed onto a compatible node.
- windowsDeprecatedForRKE1: Windows support is being deprecated for RKE1. We suggest migrating to RKE2.
+ windowsDeprecatedForRKE1: Windows support is being deprecated for RKE1 and RKE1 is soon to be deprecrated. Please migrate to RKE2.
insecure: "Insecure: Select this to skip TLS verification if your server has a self-signed certificate."
credential:
banner:
createCredential: |-
{length, plural,
- =0 {First you'll need to create a credential to talk to the cloud provider}
- other {Ok, Let's create a new credential}
+ =0 {First, you will need to create a credential to talk to the cloud provider}
+ other {Ok, start to create a new credential}
}
selectExisting:
label: Select Existing
@@ -1160,7 +1160,7 @@ cluster:
label: Access Key
placeholder: Your AWS Access Key
defaultRegion:
- help: The default region to use when creating clusters. Also contacted to verify that this credential works.
+ help: The default region to use when creating clusters. Also contacted to verify that this credential works.
label: Default Region
secretKey:
label: Secret Key
@@ -1265,7 +1265,7 @@ cluster:
volume: Volume
imageVolume: Image Volume
addVolume: Add Volume
- addVMImage: Add VM Image
+ addVMImage: Add Virtual Machine Image
storageClass: Storage Class
sshUser: SSH User
userData:
@@ -1281,9 +1281,9 @@ cluster:
installGuestAgent: Install guest agent
description:
label: Cluster Description
- placeholder: Any text you want that better describes this cluster
+ placeholder: Any text to describe this cluster
harvester:
- importNotice: Import Harvester Clusters via
+ importNotice: Import Harvester Clusters Via
warning:
label: This is a Harvester Cluster - enable the Harvester feature flag to manage it
state: Warning
@@ -1316,11 +1316,11 @@ cluster:
sshUser:
placeholder: e.g. ubuntu
toolTip: SSH user to login with the selected OS image.
- haveOneOwner: There must be at least one member with the Owner role.
+ haveOneOwner: There must be at least one member with the owner role.
import:
warningBanner: 'You should not import a cluster which has already been connected to another instance of Rancher as it will lead to data corruption.'
commandInstructions: 'Run the kubectl
command below on an existing Kubernetes cluster running a supported Kubernetes version to import it into {vendor}:'
- commandInstructionsInsecure: 'If you get a "certificate signed by unknown authority" error, your {vendor} installation has a self-signed or untrusted SSL certificate. Run the command below instead to bypass the certificate verification:'
+ commandInstructionsInsecure: 'If you get a "certificate signed by unknown authority" error, your {vendor} installation has a self-signed or untrusted SSL certificate. Run the command below instead to bypass the certificate verification:'
clusterRoleBindingInstructions: 'If you get permission errors creating some of the resources, your user may not have the cluster-admin
role. Use this command to apply it:'
clusterRoleBindingCommand: 'kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user /etc/alertmanager/secrets/label: Additional Secrets - existing: Choose an existing config secret + existing: Choose an existing configuration secret info: | - Create default config: A Secret containing your Alertmanager Config will be created in the
cattle-monitoring-systemnamespace on deploying this chart under the name
alertmanager-rancher-monitoring-alertmanager. By default, this Secret will never be modified on an uninstall or upgrade of this chart.
cattle-monitoring-systemnamespace. If the namespace does not exist, you will not be able to select an existing secret. + Create default configuration: A Secret containing your Alertmanager configuration will be created in the
cattle-monitoring-systemnamespace on deploying this chart under the name
alertmanager-rancher-monitoring-alertmanager. By default, this secret will never be modified on an uninstall or upgrade of this chart.
cattle-monitoring-systemnamespace. If the namespace does not exist, you will not be able to select an existing secret. label: Alertmanager Secret - new: Create default config + new: Create default configuration radio: - label: Config Secret + label: Configuration Secret validation: duplicatedReceiverName: A receiver with the name {name} already exists. templates: @@ -3169,7 +3169,7 @@ monitoring: adminApi: Admin API evaluation: Evaluation Interval ignoreNamespaceSelectors: - help: 'Ignoring Namespace Selectors allows Cluster Admins to limit teams from monitoring resources outside of namespaces they have permissions to but can break the functionality of Apps that rely on setting up Monitors that scrape targets across multiple namespaces, such as Istio.' + help: 'Ignoring Namespace Selectors allows cluster admins to limit teams from monitoring resources outside of namespaces they have permissions to but can break the functionality of applications that rely on setting up monitors that scrape targets across multiple namespaces, such as Istio.' label: Namespace Selectors radio: enforced: 'Use: Monitors can access resources based on namespaces that match the namespace selector field' @@ -3189,13 +3189,13 @@ monitoring: label: Persistent Storage for Prometheus mode: Access Mode selector: Selector - selectorWarning: 'If you are using a dynamic provisioner (e.g. Longhorn), no Selectors should be specified since a PVC with a non-empty selector can''t have a PV dynamically provisioned for it.' + selectorWarning: 'If you are using a dynamic provisioner (e.g. Longhorn), no selectors should be specified since a PVC with a non-empty selector cannot have a PV dynamically provisioned for it.' size: Size volumeName: Volume Name title: Configure Prometheus warningInstalled: | Warning: Prometheus Operators are currently deployed. Deploying multiple Prometheus Operators onto one cluster is not currently supported. Please remove all other Prometheus Operator deployments from this cluster before trying to install this chart. - If you are migrating from an older version of {vendor} with Monitoring enabled, please disable Monitoring on this cluster completely before attempting to install this chart. + If you are migrating from an older version of {vendor} with monitoring enabled, please disable monitoring on this cluster completely before attempting to install this chart. receiver: addReceiver: Add Receiver fields: @@ -3211,16 +3211,16 @@ monitoring: keyFilePath: label: Key File Path placeholder: e.g. ./key-file.pfx - secretsBanner: The file paths below must be referenced in
alertmanager.alertmanagerSpec.secretswhen deploying the Monitoring chart. For more information see our documentation. + secretsBanner: The file paths below must be referenced in
alertmanager.alertmanagerSpec.secretswhen deploying the monitoring chart. For more information see our documentation. projectMonitoring: detail: - error: "Unable to fetch Dashboard values with status: " + error: "Unable to fetch dashboard values with status: " list: - banner: Project Monitoring Configuration is stored in ProjectHelmChart resources + banner: Project monitoring configuration is stored in ProjectHelmChart resources empty: - message: Project Monitoring has not been configured for any projects - canCreate: Get started by clicking Create to add monitoring to a project - cannotCreate: Contact the admin to add project monitoring + message: Project monitoring has not been configured for any projects + canCreate: Get started by clicking create to add monitoring to a project + cannotCreate: Contact the administrator to add project monitoring route: label: Route fields: @@ -3235,9 +3235,9 @@ monitoring: alertmanagerConfig: description: Routes and receivers for project alerting and cluster alerting are configured within AlertmanagerConfig resources. empty: Alerts have not been configured for any accessible namespaces. - getStarted: Get started by clicking Create to configure an alert. + getStarted: Get started by clicking create to configure an alert. receiverTooltip: This route will direct alerts to the selected receiver, which must be defined in the same AlertmanagerConfig. - deprecationWarning: The Route and Receiver resources are deprecated. Going forward, routes and receivers should not be managed as separate Kubernetes resources on this page. They should be configured as YAML fields in an AlertmanagerConfig resource. + deprecationWarning: The route and receiver resources are deprecated. Going forward, routes and receivers should not be managed as separate Kubernetes resources on this page. They should be configured as YAML fields in an AlertmanagerConfig resource. routeInfo: This form supports configuring one route that directs traffic to a receiver. Alerts can be directed to more receiver(s) by configuring child routes in YAML. receiverFormNames: create: Create Receiver in AlertmanagerConfig @@ -3272,41 +3272,41 @@ monitoring: warning2: Learn more about the migration steps to V2 Monitoring. promptDescription:
rancher-alerting-driversdefault values were changed, please update the url below in the format http://<new_service_name>.<new_namespace>.svc.<port>/<path> - banner: To use MS Teams or SMS you will need to have at least one instance of
rancher-alerting-driversinstalled first. + title: Webhook Configuration + urlTooltip: For some webhooks this a URL that point to the service DNS + modifyNamespace: If
rancher-alerting-driversdefault values were changed, please update the URL below in the format http://<new_service_name>.<new_namespace>.svc.<port>/<path> + banner: To use MS Teams or SMS, you will need to have at least one instance of
rancher-alerting-driversinstalled first. add: selectWebhookType: Select Webhook Type generic: Generic @@ -3341,7 +3341,7 @@ monitoringReceiver: label: Enable send resolved alerts alertmanagerConfigReceiver: - secretKeyId: Key Id from Secret + secretKeyId: Key ID from Secret name: Receiver Name addButton: Add Receiver receivers: Receivers @@ -3355,7 +3355,7 @@ monitoringRoute: label: Group By addGroupByLabel: Labels to Group Alerts By groupByTooltip: Add each label as a string in the format key:value. The special label ... will aggregate by all possible labels. If provided, the ... must be the only element in the list. - info: This is the top-level Route used by Alertmanager as the default destination for any Alerts that do not match any other Routes. This Route must exist and cannot be deleted. + info: This is the top-level route used by Alertmanager as the default destination for any alerts that do not match any other routes. This route must exist and cannot be deleted. interval: label: Group Interval matching: @@ -3504,7 +3504,7 @@ networkpolicy: ruleHint: Incoming traffic is only allowed from the configured sources portHint: Incoming traffic is only allowed to connect to the configured ports labelsAnnotations: - label: Labels & Annotations + label: Labels and Annotations rules: pod: Pod namespace: Namespace @@ -3535,12 +3535,12 @@ networkpolicy: namespaceSelector: label: Namespace Selector namespaceAndPodSelector: - label: Namespace/Pod Selector + label: Namespace and Pod Selector config: label: Configuration selectors: label: Selectors - hint: The NetworkPolicy is applied to the selected Pods + hint: The NetworkPolicy is applied to the selected pods matchingPods: matchesSome: |- {matched, plural, @@ -3592,8 +3592,8 @@ node: used: Used amount: "{used} of {total} {unit}" cpu: CPU - memory: MEMORY - pods: PODS + memory: Memory + pods: Pods diskPressure: Disk Pressure kubelet: kubelet memoryPressure: Memory Pressure @@ -3806,7 +3806,7 @@ persistentVolume: portals: add: Add Portal cinder: - label: Openstack Cinder Volume (Unsupported) + label: OpenStack Cinder Volume (Unsupported) volumeId: label: Volume ID placeholder: e.g. vol @@ -3891,7 +3891,7 @@ persistentVolume: label: Path on the Node placeholder: /mnt/disks/ssd1 mustBe: - label: The Path on the Node must be + label: The path on the node must be anything: 'Anything: do not check the target path' directory: A directory, or create if it does not exist file: A file, or create if it does not exist @@ -3954,8 +3954,8 @@ persistentVolumeClaim: source: label: Source options: - new: Use a Storage Class to provision a new Persistent Volume - existing: Use an existing Persistent Volume + new: Use a storage class to provision a new persistent volume + existing: Use an existing persistent volume expand: label: Expand notSupported: Storage class does not support volume expansion @@ -3966,8 +3966,8 @@ persistentVolumeClaim: requestStorage: Request Storage persistentVolume: Persistent Volume tooltips: - noStorageClass: You don't have permission to list Storage Classes, enter a name manually - noPersistentVolume: You don't have permission to list Persistent Volumes, enter a name manually + noStorageClass: You do not have permission to list storage classes, enter a name manually + noPersistentVolume: You do not have permission to list persistent volumes, enter a name manually customize: label: Customize accessModes: @@ -4006,18 +4006,18 @@ plugins: installing: Installing ... uninstalling: Uninstalling ... descriptions: - experimental: This Extension is marked as experimental - third-party: This Extension is provided by a Third-Party - built-in: This Extension is built-in - image: This Extension Image has been loaded manually + experimental: This extension is marked as experimental + third-party: This extension is provided by a third-party + built-in: This extension is built-in + image: This extension image has been loaded manually error: title: Error loading extension message: Could not load extension code generic: Extension error - api: This Extension is not compatible with the Extensions API - host: This Extension is not compatible with this application - version: This Extension is not compatible with this version of Rancher - load: An error occurred loading the code for this Extension + api: This extension is not compatible with the extension API + host: This extension is not compatible with this application + version: This extension is not compatible with this version of Rancher + load: An error occurred loading the code for this extension success: title: Loaded extension {name} message: Extension was loaded successfully @@ -4036,10 +4036,10 @@ plugins: requiresVersion: "Requires Rancher {version}" empty: all: Extensions are neither installed nor available - available: No Extensions available - installed: No Extensions installed - updates: No updates available for installed Extensions - images: No Extension Images installed + available: No extension available + installed: No extension installed + updates: No updates available for installed extension + images: No extension images installed loadError: An error occurred loading the code for this extension helmError: "An error occurred installing the extension via Helm" manageRepos: Manage Repositories @@ -4050,14 +4050,14 @@ plugins: subtitle: Catalogs imageLoad: load: Import Extension Catalog - prompt: An Extension Catalog contains extension assets bundled into an image, importing will take the image and host a Helm repository to act as a catalog for custom built Extensions. + prompt: An extension catalog contains extension assets bundled into an image, importing will take the image and host a Helm repository to act as a catalog for custom built extensions. fields: image: label: Catalog Image Reference placeholder: "e.g. hub.docker.io/example-org/my-image:latest" secrets: - banner: "If the registry that hosts the Catalog Image requires Pull Secrets, they must be created in the following namespace:
cattle-ui-plugin-system" - banner: This will create an Deployment, Service, and Helm repository to serve the extension charts. + banner: "If the registry that hosts the catalog image requires pull secrets, they must be created in the following namespace:
cattle-ui-plugin-system" + banner: This will create a deployment, service, and Helm repository to serve the extension charts. imageVersion: title: Image Version Not Found message: Unable to determine image version from {image}, defaulting to latest @@ -4074,7 +4074,7 @@ plugins: message: A repository with the name {repo} already exists success: title: "Imported Extension Catalog from: {name}" - message: Extension Catalog image was imported successfully + message: Extension catalog image was imported successfully headers: image: name: images @@ -4093,23 +4093,23 @@ plugins: install: label: Install title: Install Extension {name} - prompt: "Are you sure that you want to install this Extension?" + prompt: "Are you sure that you want to install this extension?" version: Version - warnNotCertified: Please ensure that you are aware of the risks of installing Extensions from untrusted authors + warnNotCertified: Please ensure that you are aware of the risks of installing extensions from untrusted authors update: label: Update title: Update Extension {name} - prompt: "Are you sure that you want to update this Extension?" + prompt: "Are you sure that you want to update this extension?" rollback: label: Rollback title: Rollback Extension {name} - prompt: "Are you sure that you want to rollback this Extension?" + prompt: "Are you sure that you want to rollback this extension?" uninstall: label: Uninstall title: "Uninstall Extension: {name}" - prompt: "Are you sure that you want to uninstall this Extension?" - custom: "Are you sure that you want to uninstall this Extension Image? This will also remove any Extensions provided by this image." - upgradeAvailable: A newer version of this Extension is available + prompt: "Are you sure that you want to uninstall this extension?" + custom: "Are you sure that you want to uninstall this extension image? This will also remove any extensions provided by this image." + upgradeAvailable: A newer version of this extension is available reload: Extensions changed - reload required safeMode: title: Extensions Safe Mode @@ -4118,19 +4118,19 @@ plugins: title: Extension support is not enabled prompt: cant: Automatic installation is not available - required Helm Charts could not be found - can: You need to install the Extension Operator + can: You need to install the extension operator install: title: Enable Extension Support? - prompt: This will install the Helm charts to enable Extension support - airgap: The Rancher Extensions Repository provides extensions published by Rancher. Un-check if your Rancher installation is air-gapped + prompt: This will install the Helm charts to enable extension support + airgap: The Rancher extensions repository provides extensions published by Rancher. De-select if your Rancher installation is air-gapped addRancherRepo: Add the Rancher Extension Repository remove: label: Disable Extension Support title: Disable Extension Support? - prompt: This will un-install the Helm charts that enable Extension support + prompt: This will un-install the Helm charts that enable extension support registry: title: Remove the Rancher Extensions Repository - prompt: The Rancher Extensions Repository provides extensions published by Rancher + prompt: The Rancher extension repository provides extensions published by Rancher crd: title: Remove the Rancher Extensions Custom Resource Definition prompt: There are one or more extensions installed - removing the CRD will require you to manually reinstall these extensions if you subsequently re-enable extensions support. @@ -4153,7 +4153,7 @@ podSecurityAdmission: placeholder: 'Version (default: latest)' exemptions: title: Exemptions - description: Allow the creation of pods for specific Usernames, RuntimeClassNames, and Namespaces that would otherwise be prohibited due to the policies set above. + description: Allow the creation of pods for specific usernames, RuntimeClassNames, and namespaces that would otherwise be prohibited due to the policies set above. placeholder: Enter a comma separated list of {psaExemptionsControl} prefs: title: Preferences @@ -4191,9 +4191,9 @@ prefs: advFeatures: title: Advanced Features viewInApi: Enable "View in API" - allNamespaces: Show system Namespaces managed by Rancher (not intended for editing or deletion) - themeShortcut: Enable Dark/Light Theme keyboard shortcut toggle (shift+T) - pluginDeveloper: Enable Extension developer features + allNamespaces: Show system namespaces managed by Rancher (not intended for editing or deletion) + themeShortcut: Enable Dark/Light theme keyboard shortcut toggle (shift+T) + pluginDeveloper: Enable extension developer features hideDesc: label: Hide All Type Descriptions helm: @@ -4249,9 +4249,9 @@ project: members: label: Members containerDefaultResourceLimit: Container Default Resource Limit - vmDefaultResourceLimit: VM Default Resource Limit + vmDefaultResourceLimit: Virtual Machine Default Resource Limit resourceQuotas: Resource Quotas - haveOneOwner: There must be at least one member with the Owner role. + haveOneOwner: There must be at least one member with the owner role. psp: default: Cluster Default label: Pod Security Policy @@ -4263,23 +4263,23 @@ projectMembers: label: Project projectPermissions: label: Project Permissions - description: Controls what access users have to the Project + description: Controls what access users have to the project noDescription: User created - no description searchForMember: Search for a member to provide project access owner: label: Owner - description: Owners have full control over the Project and all resources inside it. + description: Owners have full control over the project and all resources inside it. member: label: Member - description: Members can manage the resources inside the Project but not change the Project itself. + description: Members can manage the resources inside the project but not change the project itself. readOnly: label: Read Only - description: Members can only view the resources inside the Project but not change the resources. + description: Members can only view the resources inside the project but not change the resources. custom: label: Custom description: Choose individual roles for this user. createNs: Create Namespaces - configmapsManage: Manage Config Maps + configmapsManage: Manage Configuration Maps ingressManage: Manage Ingress projectcatalogsManage: Manage Project Catalogs projectroletemplatebindingsManage: Manage Project Members @@ -4324,7 +4324,7 @@ prometheusRule: summary: input: Summary Annotation Value label: Summary - bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured Receivers.' + bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured receivers.' for: label: Wait to fire for placeholder: '60' @@ -4363,14 +4363,14 @@ prometheusRule: promptForceRemove: modalTitle: Are you sure? - removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. It's highly recommended to manually delete any referenced infrastructure." + removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. We recommend to manually delete any referenced infrastructure." forceDelete: Force Delete confirmName: "Enter in the pool name below to confirm:" podRemoveWarning: "Force deleting pods does not wait for confirmation that the pod's processes have been terminated. This may result in data corruption or inconsistencies" promptScaleMachineDown: attemptingToRemove: "You are attempting to delete {count} {type}" - retainedMachine1: At least one Machine must exist for roles Control Plane and Etcd. + retainedMachine1: At least one machine must exist for roles control plane and Etcd. retainedMachine2: { name } will remain promptRemove: @@ -4382,7 +4382,7 @@ promptRemove: other { and {count} others.} } attemptingToRemove: "You are attempting to delete the {type}" - attemptingToRemoveAuthConfig: "You are attempting to disable this Auth Provider.
docker ps
, then run:'
dockerSuffix: ""
@@ -5277,7 +5277,7 @@ tableHeaders:
apiGroup: API Groups
apikey: API Key
available: Available
- attachedVM: Attached VM
+ attachedVM: Attached Virtual Machine
authRoles:
globalDefault: New User Default
@@ -5380,7 +5380,7 @@ tableHeaders:
namespaceName: Name
namespaceNameUnlinked: Name
networkType: Type
- networkVlan: Vlan ID
+ networkVlan: VLAN ID
node: Node
nodeName: Node Name
nodesReady: Nodes Ready
@@ -5548,7 +5548,7 @@ validation:
name: Cluster name cannot be 'local' or take the form 'c-xxxxx'
conflict: |-
This resource has been modified since you started editing it, and some of those modifications conflict with your changes.
- This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then Save again.
+ This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then save again.
Conflicting {fieldCount, plural, =1 {field} other {fields}}: {fields}
custom:
missing: 'No validator exists for { validatorName }! Does the validator exist in custom-validators? Is the name spelled correctly?'
@@ -5577,7 +5577,7 @@ validation:
global: Requires "Cluster Output" to be selected.
output:
logdna:
- apiKey: Required an "Api Key" to be set.
+ apiKey: Required an "API Key" to be set.
invalidCron: Invalid cron schedule
k8s:
name: Must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc').
@@ -5617,17 +5617,17 @@ validation:
port: A port must be a number between 1 and 65535.
path: '"{key}" must be an absolute path'
prometheusRule:
- noEdit: This Prometheus Rule may not be edited due to invalid characters in name.
+ noEdit: This Prometheus rule may not be edited due to invalid characters in name.
groups:
required: At least one rule group is required.
singleAlert: A rule may contain alert rules or recording rules but not both.
valid:
name: 'Name is required for rule group {index}.'
rule:
- alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a Alert Name.'
- expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL Expression.'
+ alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a alert name.'
+ expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL expression.'
labels: 'Rule group {groupIndex} rule {ruleIndex} requires at least one label. Severity is recommended.'
- recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a Time Series Name.'
+ recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a time series name.'
singleEntry: 'At least one alert rule or one recording rule is required in rule group {index}.'
required: '"{key}" is required'
invalid: '"{key}" is invalid'
@@ -5636,12 +5636,12 @@ validation:
roleTemplate:
roleTemplateRules:
missingVerb: You must specify at least one verb for each resource grant
- missingResource: You must specify a Resource for each resource grant
- missingApiGroup: You must specify an API Group for each resource grant
- missingOneResource: You must specify at least one Resource, Non-Resource URL or API Group for each resource grant
+ missingResource: You must specify a resource for each resource grant
+ missingApiGroup: You must specify an API group for each resource grant
+ missingOneResource: You must specify at least one resource, non-resource URL or API group for each resource grant
service:
externalName:
- none: External Name is required on an ExternalName Service.
+ none: External name is required on an ExternalName service.
ports:
name:
required: 'Port Rule [{position}] - Name is required.'
@@ -5669,7 +5669,7 @@ validation:
missingProjectId: A target must have a project selected.
monitoring:
route:
- match: At least one Match or Match Regex must be selected
+ match: At least one match or match regex must be selected
interval: '"{key}" must be of a format with digits followed by a unit i.e. 1h, 2m, 30s'
tab: "One or more fields in this tab contain a form validation error"
@@ -5761,9 +5761,9 @@ workload:
initialDelay: Initial Delay
livenessProbe: Liveness Check
livenessTip: Containers will be restarted when this check is failing. Not recommended for most uses.
- noHealthCheck: "There is not a Readiness Check, Liveness Check or Startup Check configured."
+ noHealthCheck: "There is not a readiness check, liveness check or startup check configured."
readinessProbe: Readiness Checks
- readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended.
+ readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended.
startupProbe: Startup Check
startupTip: Containers will wait until this check succeeds before attempting other health checks.
successThreshold: Success Threshold
@@ -5829,9 +5829,9 @@ workload:
noServiceAccess: You do not have permission to create or manage services
ports:
expose: Networking
- description: 'Define a Service to expose the container, or define a non-functional, named port so that humans will know where the app within the container is expected to run.'
- detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a Service is automatically created that will select the Pods in this workload using labels.
- toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on Services. You can also manually create a Service to expose Pods by selecting their labels, and you can use an Ingress to map HTTP routes to Services.'
+ description: 'Define a service to expose the container, or define a non-functional, named port so that other users will know where the application within the container is expected to run.'
+ detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a service is automatically created that will select the pods in this workload using labels.
+ toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on services. You can also manually create a service to expose pods by selecting their labels, and you can use an ingress to map HTTP routes to services.'
createService: Service Type
noCreateService: Do not create a service
containerPort: Private Container Port
@@ -5905,13 +5905,13 @@ workload:
detail:
services: Services
ingresses: Ingresses
- cannotViewServices: Could not list Services due to lack of permission.
- cannotFindServices: Could not find any Services that select Pods from this workload.
- serviceListCaption: "The following Services select Pods from this workload:"
- cannotViewIngresses: Could not list Ingresses due to lack of permission.
- cannotFindIngresses: Could not find any Ingresses that forward traffic to Services that select Pods in this workload.
- ingressListCaption: "The following Ingresses forward traffic to Services that select Pods from this workload:"
- cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant Ingresses due to lack of permission to view Services.
+ cannotViewServices: Could not list services due to lack of permission.
+ cannotFindServices: Could not find any services that select pods from this workload.
+ serviceListCaption: "The following services select pods from this workload:"
+ cannotViewIngresses: Could not list ingresses due to lack of permission.
+ cannotFindIngresses: Could not find any ingresses that forward traffic to services that select pods in this workload.
+ ingressListCaption: "The following ingresses forward traffic to services that select pods from this workload:"
+ cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant ingresses due to lack of permission to view services.
pods:
title: Pods
detailTop:
@@ -6100,7 +6100,7 @@ workload:
addMount: Add Mount
addVolume: Add Volume
selectVolume: Select Volume
- noVolumes: Volumes will appear here after they are added in the Pod tab
+ noVolumes: Volumes will appear here after they are added in the pod tab
certificate: Certificate
csi:
diskName: Disk Name
@@ -6131,12 +6131,12 @@ workload:
defaultMode: Default Mode
driver: driver
hostPath:
- label: The Path on the Node must be
+ label: The Path on the node must be
options:
default: 'Anything: do not check the target path'
- directoryOrCreate: A directory, or create if it doesn't exist
+ directoryOrCreate: A directory, or create if it does not exist
directory: An existing directory
- fileOrCreate: A file, or create if it doesn't exist
+ fileOrCreate: A file, or create if it does not exist
file: An existing file
socket: An existing socket
charDevice: An existing character device
@@ -6165,11 +6165,11 @@ workload:
placeholder: "e.g. 300"
typeDescriptions:
apps.daemonset: DaemonSets run exactly one pod on every eligible node. When new nodes are added to the cluster, DaemonSets automatically deploy to them. Recommended for system-wide or vertically-scalable workloads that never need more than one pod per node.
- apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless & horizontally-scalable workloads.
+ apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless and horizontally-scalable workloads.
apps.statefulset: StatefulSets manage stateful applications and provide guarantees about the ordering and uniqueness of the pods created. Recommended for workloads with persistent storage or strict identity, quorum, or upgrade order requirements.
- batch.cronjob: CronJobs create Jobs, which then run Pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC).
+ batch.cronjob: CronJobs create jobs, which then run pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC).
batch.job: Jobs create one or more pods to reliably perform a one-time task by running a pod until it exits successfully. Failed pods are automatically replaced until the specified number of completed runs has been reached. Jobs can also run multiple pods in parallel or function as a batch work queue.
- pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A Pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers.
+ pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers.
upgrading:
activeDeadlineSeconds:
label: Pod Active Deadline
@@ -6178,8 +6178,8 @@ workload:
label: Concurrency
options:
allow: Allow CronJobs to run concurrently
- forbid: Skip next run if current run hasn't finished
- replace: Replace run if current run hasn't finished
+ forbid: Skip next run if current run has not finished
+ replace: Replace run if current run has not finished
maxSurge:
label: Max Surge
tip: The maximum number of pods allowed beyond the desired scale at any given time.
@@ -6201,7 +6201,7 @@ workload:
labels:
delete: "On Delete: New pods are only created when old pods are manually deleted."
recreate: "Recreate: Kill ALL pods, then start new pods."
- rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Don't stop more pods than max unavailable."
+ rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Do not stop more pods than max unavailable."
terminationGracePeriodSeconds:
label: Termination Grace Period
tip: The duration the pod needs to terminate successfully.
@@ -6298,24 +6298,24 @@ typeDescription:
cis.cattle.io.clusterscanprofile: A profile is the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark.
cis.cattle.io.clusterscan: A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed.
cis.cattle.io.clusterscanreport: A report is the result of a CIS scan of the cluster.
- management.cattle.io.feature: Feature Flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality.
- cluster.x-k8s.io.machine: A Machine encapsulates the configuration of a Kubernetes Node. Use this view to see what happens after updating a cluster.
- cluster.x-k8s.io.machinedeployment: A Machine Deployment orchestrates deployments via templates over a collection of Machine Sets (similar to a Deployment). Use this view to see what happens after updating a cluster.
- cluster.x-k8s.io.machineset: A Machine Set ensures the desired number of Machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster.
+ management.cattle.io.feature: Feature flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality.
+ cluster.x-k8s.io.machine: A machine encapsulates the configuration of a Kubernetes node. Use this view to see what happens after updating a cluster.
+ cluster.x-k8s.io.machinedeployment: A machine deployment orchestrates deployments via templates over a collection of machine sets (similar to a deployment). Use this view to see what happens after updating a cluster.
+ cluster.x-k8s.io.machineset: A machine set ensures the desired number of machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster.
resources.cattle.io.backup: A backup is created to perform one-time backups or schedule recurring backups based on a ResourceSet.
resources.cattle.io.restore: A restore is created to trigger a restore to the cluster based on a backup file.
resources.cattle.io.resourceset: A resource set defines which CRDs and resources to store in the backup.
monitoring.coreos.com.servicemonitor: A service monitor defines the group of services and the endpoints that Prometheus will scrape for metrics. This is the most common way to define metrics collection.
- monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor wouldn't work.
- monitoring.coreos.com.prometheusrule: A Prometheus Rule resource defines both recording and/or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
+ monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor would not work.
+ monitoring.coreos.com.prometheusrule: A Prometheus rule resource defines both recording or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
monitoring.coreos.com.prometheus: A Prometheus server is a Prometheus deployment whose scrape configuration and rules are determined by selected ServiceMonitors, PodMonitors, and PrometheusRules and whose alerts will be sent to all selected Alertmanagers with the custom resource's configuration.
monitoring.coreos.com.alertmanager: An alert manager is deployment whose configuration will be specified by a secret in the same namespace, which determines which alerts should go to which receiver.
- node: The base Kubernetes Node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management.
+ node: The base Kubernetes node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management.
catalog.cattle.io.clusterrepo: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster.'
- catalog.cattle.io.clusterrepo.local: ' A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.'
+ catalog.cattle.io.clusterrepo.local: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.'
catalog.cattle.io.operation: An operation is the list of recent Helm operations that have been applied to the cluster.
catalog.cattle.io.app: An installed application is a Helm 3 chart that was installed either via our charts or through the Helm CLI.
- logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected Cluster Output.
+ logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected cluster output.
logging.banzaicloud.io.clusteroutput: A cluster output defines which logging providers that logs can be sent to and is only effective when deployed in the namespace that the logging operator is in.
logging.banzaicloud.io.flow: A flow defines which logs to collect and filter as well as which output to send the logs. The flow is a namespaced resource, which means logs will only be collected from the namespace that the flow is deployed in.
logging.banzaicloud.io.output: An output defines which logging providers that logs can be sent to. The output needs to be in the same namespace as the flow that is using it.
@@ -6349,8 +6349,8 @@ typeLabel:
}
catalog.cattle.io.app: |-
{count, plural,
- one { Installed App }
- other { Installed Apps }
+ one { Installed Application }
+ other { Installed Applications }
}
catalog.cattle.io.clusterrepo: |-
{count, plural,
@@ -6359,18 +6359,18 @@ typeLabel:
}
catalog.cattle.io.repo: |-
{count, plural,
- one { Namespaced Repo }
- other { Namespaced Repos }
+ one { Namespaced Repository }
+ other { Namespaced Repositories }
}
chartinstallaction: |-
{count, plural,
- one { App }
- other { Apps }
+ one { Application }
+ other { Applications }
}
chartupgradeaction: |-
{count, plural,
- one { App }
- other { Apps }
+ one { Application }
+ other { Applications }
}
cloudcredential: |-
{count, plural,
@@ -6394,8 +6394,8 @@ typeLabel:
}
fleet.cattle.io.gitrepo: |-
{count, plural,
- one { Git Repo }
- other {Git Repos }
+ one { Git Repository }
+ other {Git Repositories }
}
management.cattle.io.authconfig: |-
{count, plural,
@@ -6500,8 +6500,8 @@ typeLabel:
}
'management.cattle.io.cluster': |-
{count, plural,
- one { Mgmt Cluster }
- other { Mgmt Clusters }
+ one { Manaagement Cluster }
+ other { Management Clusters }
}
'cluster.x-k8s.io.cluster': |-
{count, plural,
@@ -6680,8 +6680,8 @@ typeLabel:
}
harvesterhci.io.cloudtemplate: |-
{count, plural,
- one { Cloud Config Template }
- other { Cloud Config Templates }
+ one { Cloud Configuration Template }
+ other { Cloud Configuration Templates }
}
fleet.cattle.io.content: |-
{count, plural,
@@ -6700,8 +6700,8 @@ typeLabel:
}
k3s.cattle.io.addon: |-
{count, plural,
- one { Addon }
- other { Addons }
+ one { Add-on }
+ other { Add-ons }
}
management.cattle.io.apiservice: |-
{count, plural,
@@ -6920,7 +6920,7 @@ keyValue:
registryMirror:
header: Mirrors
- toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.'
+ toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.'
addLabel: Add Mirror
description: Mirrors define the names and endpoints for private registries. The endpoints are tried one by one, and the first working one is used.
hostnameLabel: Registry Hostname
@@ -6968,12 +6968,12 @@ advancedSettings:
'cluster-defaults': 'Override RKE Defaults when creating new clusters.'
'engine-install-url': 'Default Docker engine installation URL (for most node drivers).'
'engine-iso-url': 'Default OS installation URL (for vSphere driver).'
- 'engine-newest-version': 'The newest supported version of Docker at the time of this release. A Docker version that does not satisfy supported docker range but is newer than this will be marked as untested.'
- 'engine-supported-range': 'Semver range for supported Docker engine versions. Versions which do not satisfy this range will be marked unsupported in the UI.'
- 'ingress-ip-domain': 'Wildcard DNS domain to use for automatically generated Ingress hostnames. auth-user-session-ttl-minutes
and auth-token-max-ttl-minutes
) in the Settings page.
+ information: To change the automatic log out behaviour, edit the authorisation and session token timeout values (auth-user-session-ttl-minutes
and auth-token-max-ttl-minutes
) in the settings page.
description: When enabled and the user is inactive past the specified timeout, the UI will no longer fresh page content and the user must reload the page to continue.
authUserTTL: This timeout cannot be higher than the user session timeout auth-user-session-ttl-minutes, which is currently {current} minutes.
@@ -7261,8 +7261,8 @@ support:
text: Login to SUSE Customer Center to access support for your subscription
action: SUSE Customer Center
aws:
- generateConfig: Generate Support Config
- text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support config file below.'
+ generateConfig: Generate Support Configuration
+ text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support configuration file below.'
promos:
one:
title: 24x7 Support
@@ -7307,7 +7307,7 @@ legacy:
project:
label: Project
- select: "Use the Project/Namespace filter at the top of the page to select a Project in order to see legacy Project features."
+ select: "Use the namespace or project filter at the top of the page to select a project in order to see legacy project features."
serverUpgrade:
title: "{vendor} Server Changed"