diff --git a/pkg/harvester/config/harvester-map.js b/pkg/harvester/config/harvester-map.js index bf49cf949a2..02cbfe25063 100644 --- a/pkg/harvester/config/harvester-map.js +++ b/pkg/harvester/config/harvester-map.js @@ -23,7 +23,7 @@ export const InterfaceOption = [{ export const SOURCE_TYPE = { NEW: 'New', - IMAGE: 'VM Image', + IMAGE: 'Virtual Machine Image', ATTACH_VOLUME: 'Existing Volume', CONTAINER: 'Container' }; diff --git a/pkg/harvester/config/harvester.js b/pkg/harvester/config/harvester.js index 4cb4ecee538..63774d06d82 100644 --- a/pkg/harvester/config/harvester.js +++ b/pkg/harvester/config/harvester.js @@ -37,8 +37,8 @@ import { import { IF_HAVE } from '@shell/store/type-map'; const TEMPLATE = HCI.VM_VERSION; -const MONITORING_GROUP = 'Monitoring & Logging::Monitoring'; -const LOGGING_GROUP = 'Monitoring & Logging::Logging'; +const MONITORING_GROUP = 'Monitoring and Logging::Monitoring'; +const LOGGING_GROUP = 'Monitoring and Logging::Logging'; export const PRODUCT_NAME = 'harvester'; diff --git a/pkg/harvester/l10n/en-us.yaml b/pkg/harvester/l10n/en-us.yaml index 8b35d8c22c4..f25ca710755 100644 --- a/pkg/harvester/l10n/en-us.yaml +++ b/pkg/harvester/l10n/en-us.yaml @@ -773,7 +773,7 @@ harvester: backupTargetTip: The endpoint used to access the backupstore. NFS and S3 are supported. message: noSetting: - prefix: You must configure the backup target in + prefix: You must configure the backup target middle: 'setting' suffix: before creating a new backup. errorTip: @@ -1270,7 +1270,7 @@ harvester: vGpuDevices: vGPU Devices showMore: Show More parentSriov: Filter By Parent SR-IOV GPU - noPermission: Please contac your system admiistrator to add Harvester add-ons first. + noPermission: Please contact your system admiistrator to add Harvester add-ons first. goSetting: prefix: The nvidia-driver-toolkit add-on is not enabled, click middle: here diff --git a/shell/assets/translations/en-us.yaml b/shell/assets/translations/en-us.yaml index 859848f61c5..002db502849 100644 --- a/shell/assets/translations/en-us.yaml +++ b/shell/assets/translations/en-us.yaml @@ -3592,8 +3592,8 @@ node: used: Used amount: "{used} of {total} {unit}" cpu: CPU - memory: MEMORY - pods: PODS + memory: Memory + pods: Pods diskPressure: Disk Pressure kubelet: kubelet memoryPressure: Memory Pressure @@ -3806,7 +3806,7 @@ persistentVolume: portals: add: Add Portal cinder: - label: Openstack Cinder Volume (Unsupported) + label: OpenStack Cinder Volume (Unsupported) volumeId: label: Volume ID placeholder: e.g. vol @@ -3891,7 +3891,7 @@ persistentVolume: label: Path on the Node placeholder: /mnt/disks/ssd1 mustBe: - label: The Path on the Node must be + label: The path on the node must be anything: 'Anything: do not check the target path' directory: A directory, or create if it does not exist file: A file, or create if it does not exist @@ -3954,8 +3954,8 @@ persistentVolumeClaim: source: label: Source options: - new: Use a Storage Class to provision a new Persistent Volume - existing: Use an existing Persistent Volume + new: Use a storage class to provision a new persistent volume + existing: Use an existing persistent volume expand: label: Expand notSupported: Storage class does not support volume expansion @@ -3966,8 +3966,8 @@ persistentVolumeClaim: requestStorage: Request Storage persistentVolume: Persistent Volume tooltips: - noStorageClass: You don't have permission to list Storage Classes, enter a name manually - noPersistentVolume: You don't have permission to list Persistent Volumes, enter a name manually + noStorageClass: You do not have permission to list storage classes, enter a name manually + noPersistentVolume: You do not have permission to list persistent volumes, enter a name manually customize: label: Customize accessModes: @@ -4006,18 +4006,18 @@ plugins: installing: Installing ... uninstalling: Uninstalling ... descriptions: - experimental: This Extension is marked as experimental - third-party: This Extension is provided by a Third-Party - built-in: This Extension is built-in - image: This Extension Image has been loaded manually + experimental: This extension is marked as experimental + third-party: This extension is provided by a third-party + built-in: This extension is built-in + image: This extension image has been loaded manually error: title: Error loading extension message: Could not load extension code generic: Extension error - api: This Extension is not compatible with the Extensions API - host: This Extension is not compatible with this application - version: This Extension is not compatible with this version of Rancher - load: An error occurred loading the code for this Extension + api: This extension is not compatible with the extension API + host: This extension is not compatible with this application + version: This extension is not compatible with this version of Rancher + load: An error occurred loading the code for this extension success: title: Loaded extension {name} message: Extension was loaded successfully @@ -4036,10 +4036,10 @@ plugins: requiresVersion: "Requires Rancher {version}" empty: all: Extensions are neither installed nor available - available: No Extensions available - installed: No Extensions installed - updates: No updates available for installed Extensions - images: No Extension Images installed + available: No extension available + installed: No extension installed + updates: No updates available for installed extension + images: No extension images installed loadError: An error occurred loading the code for this extension helmError: "An error occurred installing the extension via Helm" manageRepos: Manage Repositories @@ -4050,14 +4050,14 @@ plugins: subtitle: Catalogs imageLoad: load: Import Extension Catalog - prompt: An Extension Catalog contains extension assets bundled into an image, importing will take the image and host a Helm repository to act as a catalog for custom built Extensions. + prompt: An extension catalog contains extension assets bundled into an image, importing will take the image and host a Helm repository to act as a catalog for custom built extensions. fields: image: label: Catalog Image Reference placeholder: "e.g. hub.docker.io/example-org/my-image:latest" secrets: - banner: "If the registry that hosts the Catalog Image requires Pull Secrets, they must be created in the following namespace:
cattle-ui-plugin-system
" - banner: This will create an Deployment, Service, and Helm repository to serve the extension charts. + banner: "If the registry that hosts the catalog image requires pull secrets, they must be created in the following namespace:
cattle-ui-plugin-system
" + banner: This will create a deployment, service, and Helm repository to serve the extension charts. imageVersion: title: Image Version Not Found message: Unable to determine image version from {image}, defaulting to latest @@ -4074,7 +4074,7 @@ plugins: message: A repository with the name {repo} already exists success: title: "Imported Extension Catalog from: {name}" - message: Extension Catalog image was imported successfully + message: Extension catalog image was imported successfully headers: image: name: images @@ -4093,23 +4093,23 @@ plugins: install: label: Install title: Install Extension {name} - prompt: "Are you sure that you want to install this Extension?" + prompt: "Are you sure that you want to install this extension?" version: Version - warnNotCertified: Please ensure that you are aware of the risks of installing Extensions from untrusted authors + warnNotCertified: Please ensure that you are aware of the risks of installing extensions from untrusted authors update: label: Update title: Update Extension {name} - prompt: "Are you sure that you want to update this Extension?" + prompt: "Are you sure that you want to update this extension?" rollback: label: Rollback title: Rollback Extension {name} - prompt: "Are you sure that you want to rollback this Extension?" + prompt: "Are you sure that you want to rollback this extension?" uninstall: label: Uninstall title: "Uninstall Extension: {name}" - prompt: "Are you sure that you want to uninstall this Extension?" - custom: "Are you sure that you want to uninstall this Extension Image? This will also remove any Extensions provided by this image." - upgradeAvailable: A newer version of this Extension is available + prompt: "Are you sure that you want to uninstall this extension?" + custom: "Are you sure that you want to uninstall this extension image? This will also remove any extensions provided by this image." + upgradeAvailable: A newer version of this extension is available reload: Extensions changed - reload required safeMode: title: Extensions Safe Mode @@ -4118,19 +4118,19 @@ plugins: title: Extension support is not enabled prompt: cant: Automatic installation is not available - required Helm Charts could not be found - can: You need to install the Extension Operator + can: You need to install the extension operator install: title: Enable Extension Support? - prompt: This will install the Helm charts to enable Extension support - airgap: The Rancher Extensions Repository provides extensions published by Rancher. Un-check if your Rancher installation is air-gapped + prompt: This will install the Helm charts to enable extension support + airgap: The Rancher extensions repository provides extensions published by Rancher. De-select if your Rancher installation is air-gapped addRancherRepo: Add the Rancher Extension Repository remove: label: Disable Extension Support title: Disable Extension Support? - prompt: This will un-install the Helm charts that enable Extension support + prompt: This will un-install the Helm charts that enable extension support registry: title: Remove the Rancher Extensions Repository - prompt: The Rancher Extensions Repository provides extensions published by Rancher + prompt: The Rancher extension repository provides extensions published by Rancher crd: title: Remove the Rancher Extensions Custom Resource Definition prompt: There are one or more extensions installed - removing the CRD will require you to manually reinstall these extensions if you subsequently re-enable extensions support. @@ -4153,7 +4153,7 @@ podSecurityAdmission: placeholder: 'Version (default: latest)' exemptions: title: Exemptions - description: Allow the creation of pods for specific Usernames, RuntimeClassNames, and Namespaces that would otherwise be prohibited due to the policies set above. + description: Allow the creation of pods for specific usernames, RuntimeClassNames, and namespaces that would otherwise be prohibited due to the policies set above. placeholder: Enter a comma separated list of {psaExemptionsControl} prefs: title: Preferences @@ -4191,9 +4191,9 @@ prefs: advFeatures: title: Advanced Features viewInApi: Enable "View in API" - allNamespaces: Show system Namespaces managed by Rancher (not intended for editing or deletion) - themeShortcut: Enable Dark/Light Theme keyboard shortcut toggle (shift+T) - pluginDeveloper: Enable Extension developer features + allNamespaces: Show system namespaces managed by Rancher (not intended for editing or deletion) + themeShortcut: Enable Dark/Light theme keyboard shortcut toggle (shift+T) + pluginDeveloper: Enable extension developer features hideDesc: label: Hide All Type Descriptions helm: @@ -4249,9 +4249,9 @@ project: members: label: Members containerDefaultResourceLimit: Container Default Resource Limit - vmDefaultResourceLimit: VM Default Resource Limit + vmDefaultResourceLimit: Virtual Machine Default Resource Limit resourceQuotas: Resource Quotas - haveOneOwner: There must be at least one member with the Owner role. + haveOneOwner: There must be at least one member with the owner role. psp: default: Cluster Default label: Pod Security Policy @@ -4263,23 +4263,23 @@ projectMembers: label: Project projectPermissions: label: Project Permissions - description: Controls what access users have to the Project + description: Controls what access users have to the project noDescription: User created - no description searchForMember: Search for a member to provide project access owner: label: Owner - description: Owners have full control over the Project and all resources inside it. + description: Owners have full control over the project and all resources inside it. member: label: Member - description: Members can manage the resources inside the Project but not change the Project itself. + description: Members can manage the resources inside the project but not change the project itself. readOnly: label: Read Only - description: Members can only view the resources inside the Project but not change the resources. + description: Members can only view the resources inside the project but not change the resources. custom: label: Custom description: Choose individual roles for this user. createNs: Create Namespaces - configmapsManage: Manage Config Maps + configmapsManage: Manage Configuration Maps ingressManage: Manage Ingress projectcatalogsManage: Manage Project Catalogs projectroletemplatebindingsManage: Manage Project Members @@ -4324,7 +4324,7 @@ prometheusRule: summary: input: Summary Annotation Value label: Summary - bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured Receivers.' + bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured receivers.' for: label: Wait to fire for placeholder: '60' @@ -4363,14 +4363,14 @@ prometheusRule: promptForceRemove: modalTitle: Are you sure? - removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. It's highly recommended to manually delete any referenced infrastructure." + removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. We recommend to manually delete any referenced infrastructure." forceDelete: Force Delete confirmName: "Enter in the pool name below to confirm:" podRemoveWarning: "Force deleting pods does not wait for confirmation that the pod's processes have been terminated. This may result in data corruption or inconsistencies" promptScaleMachineDown: attemptingToRemove: "You are attempting to delete {count} {type}" - retainedMachine1: At least one Machine must exist for roles Control Plane and Etcd. + retainedMachine1: At least one machine must exist for roles control plane and Etcd. retainedMachine2: { name } will remain promptRemove: @@ -4382,7 +4382,7 @@ promptRemove: other { and {count} others.} } attemptingToRemove: "You are attempting to delete the {type}" - attemptingToRemoveAuthConfig: "You are attempting to disable this Auth Provider.

Be aware that cluster role template bindings, project role template bindings, global role bindings, users, tokens will be all deleted.

Are you sure you want to proceed?" + attemptingToRemoveAuthConfig: "You are attempting to disable this authenticator provider.

Be aware that cluster role template bindings, project role template bindings, global role bindings, users, tokens will be all deleted.

Are you sure you want to proceed?" protip: "Tip: Hold the {alternateLabel} key while clicking delete to bypass this confirmation" confirmName: "Enter {nameToMatch} below to confirm:" deleteAssociatedNamespaces: "Also delete the namespaces in this project:" @@ -4424,7 +4424,7 @@ promptSaveAsRKETemplate: promptRotateEncryptionKey: title: Rotate Encryption Keys description: The last backup {name} was performed on {date} - warning: Before proceeding, ensure a successful ETCD backup of the cluster has been completed. + warning: Before proceeding, ensure a successful etcd backup of the cluster has been completed. error: No backup found rancherAlertingDrivers: @@ -4524,10 +4524,10 @@ rbac: description: Administrators have full control over the entire installation and all resources in all clusters. restricted-admin: label: Restricted Administrator - description: Restricted Admins have full control over all resources in all downstream clusters but no access to the local cluster. + description: Restricted administrators have full control over all resources in all downstream clusters but no access to the local cluster. user: label: Standard User - description: Standard Users can create new clusters and manage clusters and projects they have been granted access to. + description: Standard users can create new clusters and manage clusters and projects they have been granted access to. user-base: label: User-Base description: User-Base users have login-access only. @@ -4539,10 +4539,10 @@ rbac: description: Allows the user to create new RKE cluster templates and become the owner of them. authn-manage: label: Configure Authentication - description: Allows the user to enable, configure, and disable all Authentication provider settings. + description: Allows the user to enable, configure, and disable all authentication provider settings. catalogs-manage: label: Legacy Configure Catalogs - description: Allows the user to add, edit, and remove management.cattle.io based catalogs resources. + description: Allows the user to add, edit, and remove management.cattle.io-based catalog resources. clusters-manage: label: Manage all Clusters description: Allows the user to manage all clusters, including ones they are not a member of. @@ -4557,31 +4557,31 @@ rbac: description: Allows the user to enable and disable custom features via feature flag settings. nodedrivers-manage: label: Configure Node Drivers - description: Allows the user to enable, configure, and remove all Node Driver settings. + description: Allows the user to enable, configure, and remove all node driver settings. nodetemplates-manage: label: Manage Node Templates - description: Allows the user to define, edit, and remove Node Templates. + description: Allows the user to define, edit, and remove node templates. podsecuritypolicytemplates-manage: label: Manage Pod Security Policies (PSPs) description: Allows the user to define, edit, and remove PSPs. roles-manage: label: Manage Roles - description: Allows the user to define, edit, and remove Role definitions. + description: Allows the user to define, edit, and remove role definitions. settings-manage: label: Manage Settings description: 'Allows the user to manage {vendor} Settings.' users-manage: label: Manage Users - description: Allows the user to create, remove, and set passwords for all Users. + description: Allows the user to create, remove, and set passwords for all users. catalogs-use: label: Use Catalogs - description: Allows the user to see and deploy Templates from the Catalog. Standard Users have this permission by default. + description: Allows the user to see and deploy templates from the catalog. Standard users have this permission by default. nodetemplates-use: label: Use Node Templates - description: Allows the user to deploy new Nodes using any existing Node Templates. + description: Allows the user to deploy new nodes using any existing node templates. view-rancher-metrics: label: 'View {vendor} Metrics' - description: Allows the user to view Metrics through the API. + description: Allows the user to view metrics through the API. base: label: Login Access clustertemplaterevisions-create: @@ -4622,8 +4622,8 @@ resourceDetail: age: Age restartCount: Pod Restarts defaultBannerMessage: - error: This resource is currently in an error state, but there isn't a detailed message available. - transitioning: This resource is currently in a transitioning state, but there isn't a detailed message available. + error: This resource is currently in an error state, but a detailed message is not available. + transitioning: This resource is currently in a transitioning state, a detailed message is not available. sensitive: hide: Hide Sensitive Values show: Show Sensitive Values @@ -4637,7 +4637,7 @@ resourceDetail: managedWarning: |- This {type} is managed by {hasName, select, no {a {managedBy} app} - yes {the {managedBy} app {appName}}}; changes made here will likely be overwritten the next time {managedBy} runs. + yes {the {managedBy} app {appName}}}; changes made here can be overwritten the next time {managedBy} runs. resourceList: head: create: Create @@ -4685,7 +4685,7 @@ resourceTabs: resourceYaml: errors: - namespaceRequired: This resource is namespaced, so a namespace must be provided. + namespaceRequired: This resource is namespaced; a namespace must be provided. buttons: continue: Continue Editing edit: Edit YAML @@ -4756,12 +4756,12 @@ secret: relatedWorkloads: Related Workloads typeDescriptions: custom: - description: Create a Secret with a custom type + description: Create a secret with a custom type 'kubernetes.io/basic-auth': description: 'Authentication with a username and password' docLink: https://kubernetes.io/docs/concepts/configuration/secret/#basic-authentication-secret 'Opaque': - description: Default type of Secret using key-value pairs + description: Default type of secret using key-value pairs docLink: https://kubernetes.io/docs/concepts/configuration/secret/#opaque-secrets 'kubernetes.io/dockerconfigjson': description: Authenticated registry for pulling container images @@ -4830,9 +4830,9 @@ serviceTypes: nodeport: Node Port servicesPage: - serviceListDescription: Services allow you to define a logical set of Pods that can be accessed with a single IP address and port. - targetPorts: The Service will send requests to this port, and the selected Pods are expected to listen on this port. - listeningPorts: The Service is exposed on this port. + serviceListDescription: Services allow you to define a logical set of pods that can be accessed with a single IP address and port. + targetPorts: The service will send requests to this port, and the selected pods are expected to listen on this port. + listeningPorts: The service is exposed on this port. anyNode: Any Node labelsAnnotations: label: Labels & Annotations @@ -4847,7 +4847,7 @@ servicesPage: placeholder: e.g. 10800 externalName: define: External Name - helpText: "External Name is intended to specify a canonical DNS name. This is a required field. To hardcode an IP address, use a Headless service." + helpText: "External name is intended to specify a canonical DNS name. This is a required field. To hardcode an IP address, use a headless service." label: External Name placeholder: e.g. my.database.example.com input: @@ -4892,7 +4892,7 @@ servicesPage: serviceTypes: clusterIp: abbrv: IP - description: Expose a set of Pods to other Pods within the cluster. This type of Service is only reachable from within the cluster. This is the default type. + description: Expose a set of pods to other pods within the cluster. This type of service is only reachable from within the cluster. This is the default type. label: Cluster IP externalName: abbrv: EN @@ -4917,7 +4917,7 @@ setup: currentPassword: Bootstrap Password confirmPassword: Confirm New Password defaultPassword: - intro: It looks like this is your first time visiting {vendor}; if you pre-set your own bootstrap password, enter it here. Otherwise a random one has been generated for you. To find it:

+ intro: It looks like this is your first time visiting {vendor}; if you have pre-set your own bootstrap password, enter it here. Otherwise a random one has been generated for you. To find it:

dockerPrefix: 'For a "docker run" installation:' dockerPs: 'Find your container ID with docker ps, then run:' dockerSuffix: "" @@ -5277,7 +5277,7 @@ tableHeaders: apiGroup: API Groups apikey: API Key available: Available - attachedVM: Attached VM + attachedVM: Attached Virtual Machine authRoles: globalDefault: New User Default @@ -5380,7 +5380,7 @@ tableHeaders: namespaceName: Name namespaceNameUnlinked: Name networkType: Type - networkVlan: Vlan ID + networkVlan: VLAN ID node: Node nodeName: Node Name nodesReady: Nodes Ready @@ -5548,7 +5548,7 @@ validation: name: Cluster name cannot be 'local' or take the form 'c-xxxxx' conflict: |- This resource has been modified since you started editing it, and some of those modifications conflict with your changes. - This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then Save again. + This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then save again. Conflicting {fieldCount, plural, =1 {field} other {fields}}: {fields} custom: missing: 'No validator exists for { validatorName }! Does the validator exist in custom-validators? Is the name spelled correctly?' @@ -5577,7 +5577,7 @@ validation: global: Requires "Cluster Output" to be selected. output: logdna: - apiKey: Required an "Api Key" to be set. + apiKey: Required an "API Key" to be set. invalidCron: Invalid cron schedule k8s: name: Must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc'). @@ -5617,17 +5617,17 @@ validation: port: A port must be a number between 1 and 65535. path: '"{key}" must be an absolute path' prometheusRule: - noEdit: This Prometheus Rule may not be edited due to invalid characters in name. + noEdit: This Prometheus rule may not be edited due to invalid characters in name. groups: required: At least one rule group is required. singleAlert: A rule may contain alert rules or recording rules but not both. valid: name: 'Name is required for rule group {index}.' rule: - alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a Alert Name.' - expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL Expression.' + alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a alert name.' + expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL expression.' labels: 'Rule group {groupIndex} rule {ruleIndex} requires at least one label. Severity is recommended.' - recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a Time Series Name.' + recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a time series name.' singleEntry: 'At least one alert rule or one recording rule is required in rule group {index}.' required: '"{key}" is required' invalid: '"{key}" is invalid' @@ -5636,12 +5636,12 @@ validation: roleTemplate: roleTemplateRules: missingVerb: You must specify at least one verb for each resource grant - missingResource: You must specify a Resource for each resource grant - missingApiGroup: You must specify an API Group for each resource grant - missingOneResource: You must specify at least one Resource, Non-Resource URL or API Group for each resource grant + missingResource: You must specify a resource for each resource grant + missingApiGroup: You must specify an API group for each resource grant + missingOneResource: You must specify at least one resource, non-resource URL or API group for each resource grant service: externalName: - none: External Name is required on an ExternalName Service. + none: External name is required on an ExternalName service. ports: name: required: 'Port Rule [{position}] - Name is required.' @@ -5669,7 +5669,7 @@ validation: missingProjectId: A target must have a project selected. monitoring: route: - match: At least one Match or Match Regex must be selected + match: At least one match or match regex must be selected interval: '"{key}" must be of a format with digits followed by a unit i.e. 1h, 2m, 30s' tab: "One or more fields in this tab contain a form validation error" @@ -5761,9 +5761,9 @@ workload: initialDelay: Initial Delay livenessProbe: Liveness Check livenessTip: Containers will be restarted when this check is failing. Not recommended for most uses. - noHealthCheck: "There is not a Readiness Check, Liveness Check or Startup Check configured." + noHealthCheck: "There is not a readiness check, liveness check or startup check configured." readinessProbe: Readiness Checks - readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended. + readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended. startupProbe: Startup Check startupTip: Containers will wait until this check succeeds before attempting other health checks. successThreshold: Success Threshold @@ -5829,9 +5829,9 @@ workload: noServiceAccess: You do not have permission to create or manage services ports: expose: Networking - description: 'Define a Service to expose the container, or define a non-functional, named port so that humans will know where the app within the container is expected to run.' - detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a Service is automatically created that will select the Pods in this workload using labels. - toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on Services. You can also manually create a Service to expose Pods by selecting their labels, and you can use an Ingress to map HTTP routes to Services.' + description: 'Define a service to expose the container, or define a non-functional, named port so that other users will know where the application within the container is expected to run.' + detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a service is automatically created that will select the pods in this workload using labels. + toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on services. You can also manually create a service to expose pods by selecting their labels, and you can use an ingress to map HTTP routes to services.' createService: Service Type noCreateService: Do not create a service containerPort: Private Container Port @@ -5905,13 +5905,13 @@ workload: detail: services: Services ingresses: Ingresses - cannotViewServices: Could not list Services due to lack of permission. - cannotFindServices: Could not find any Services that select Pods from this workload. - serviceListCaption: "The following Services select Pods from this workload:" - cannotViewIngresses: Could not list Ingresses due to lack of permission. - cannotFindIngresses: Could not find any Ingresses that forward traffic to Services that select Pods in this workload. - ingressListCaption: "The following Ingresses forward traffic to Services that select Pods from this workload:" - cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant Ingresses due to lack of permission to view Services. + cannotViewServices: Could not list services due to lack of permission. + cannotFindServices: Could not find any services that select pods from this workload. + serviceListCaption: "The following services select pods from this workload:" + cannotViewIngresses: Could not list ingresses due to lack of permission. + cannotFindIngresses: Could not find any ingresses that forward traffic to services that select pods in this workload. + ingressListCaption: "The following ingresses forward traffic to services that select pods from this workload:" + cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant ingresses due to lack of permission to view services. pods: title: Pods detailTop: @@ -6100,7 +6100,7 @@ workload: addMount: Add Mount addVolume: Add Volume selectVolume: Select Volume - noVolumes: Volumes will appear here after they are added in the Pod tab + noVolumes: Volumes will appear here after they are added in the pod tab certificate: Certificate csi: diskName: Disk Name @@ -6131,12 +6131,12 @@ workload: defaultMode: Default Mode driver: driver hostPath: - label: The Path on the Node must be + label: The Path on the node must be options: default: 'Anything: do not check the target path' - directoryOrCreate: A directory, or create if it doesn't exist + directoryOrCreate: A directory, or create if it does not exist directory: An existing directory - fileOrCreate: A file, or create if it doesn't exist + fileOrCreate: A file, or create if it does not exist file: An existing file socket: An existing socket charDevice: An existing character device @@ -6165,11 +6165,11 @@ workload: placeholder: "e.g. 300" typeDescriptions: apps.daemonset: DaemonSets run exactly one pod on every eligible node. When new nodes are added to the cluster, DaemonSets automatically deploy to them. Recommended for system-wide or vertically-scalable workloads that never need more than one pod per node. - apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless & horizontally-scalable workloads. + apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless and horizontally-scalable workloads. apps.statefulset: StatefulSets manage stateful applications and provide guarantees about the ordering and uniqueness of the pods created. Recommended for workloads with persistent storage or strict identity, quorum, or upgrade order requirements. - batch.cronjob: CronJobs create Jobs, which then run Pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC). + batch.cronjob: CronJobs create jobs, which then run pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC). batch.job: Jobs create one or more pods to reliably perform a one-time task by running a pod until it exits successfully. Failed pods are automatically replaced until the specified number of completed runs has been reached. Jobs can also run multiple pods in parallel or function as a batch work queue. - pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A Pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers. + pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers. upgrading: activeDeadlineSeconds: label: Pod Active Deadline @@ -6178,8 +6178,8 @@ workload: label: Concurrency options: allow: Allow CronJobs to run concurrently - forbid: Skip next run if current run hasn't finished - replace: Replace run if current run hasn't finished + forbid: Skip next run if current run has not finished + replace: Replace run if current run has not finished maxSurge: label: Max Surge tip: The maximum number of pods allowed beyond the desired scale at any given time. @@ -6201,7 +6201,7 @@ workload: labels: delete: "On Delete: New pods are only created when old pods are manually deleted." recreate: "Recreate: Kill ALL pods, then start new pods." - rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Don't stop more pods than max unavailable." + rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Do not stop more pods than max unavailable." terminationGracePeriodSeconds: label: Termination Grace Period tip: The duration the pod needs to terminate successfully. @@ -6298,24 +6298,24 @@ typeDescription: cis.cattle.io.clusterscanprofile: A profile is the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark. cis.cattle.io.clusterscan: A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed. cis.cattle.io.clusterscanreport: A report is the result of a CIS scan of the cluster. - management.cattle.io.feature: Feature Flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality. - cluster.x-k8s.io.machine: A Machine encapsulates the configuration of a Kubernetes Node. Use this view to see what happens after updating a cluster. - cluster.x-k8s.io.machinedeployment: A Machine Deployment orchestrates deployments via templates over a collection of Machine Sets (similar to a Deployment). Use this view to see what happens after updating a cluster. - cluster.x-k8s.io.machineset: A Machine Set ensures the desired number of Machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster. + management.cattle.io.feature: Feature flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality. + cluster.x-k8s.io.machine: A machine encapsulates the configuration of a Kubernetes node. Use this view to see what happens after updating a cluster. + cluster.x-k8s.io.machinedeployment: A machine deployment orchestrates deployments via templates over a collection of machine sets (similar to a deployment). Use this view to see what happens after updating a cluster. + cluster.x-k8s.io.machineset: A machine set ensures the desired number of machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster. resources.cattle.io.backup: A backup is created to perform one-time backups or schedule recurring backups based on a ResourceSet. resources.cattle.io.restore: A restore is created to trigger a restore to the cluster based on a backup file. resources.cattle.io.resourceset: A resource set defines which CRDs and resources to store in the backup. monitoring.coreos.com.servicemonitor: A service monitor defines the group of services and the endpoints that Prometheus will scrape for metrics. This is the most common way to define metrics collection. - monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor wouldn't work. - monitoring.coreos.com.prometheusrule: A Prometheus Rule resource defines both recording and/or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager. + monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor would not work. + monitoring.coreos.com.prometheusrule: A Prometheus rule resource defines both recording or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager. monitoring.coreos.com.prometheus: A Prometheus server is a Prometheus deployment whose scrape configuration and rules are determined by selected ServiceMonitors, PodMonitors, and PrometheusRules and whose alerts will be sent to all selected Alertmanagers with the custom resource's configuration. monitoring.coreos.com.alertmanager: An alert manager is deployment whose configuration will be specified by a secret in the same namespace, which determines which alerts should go to which receiver. - node: The base Kubernetes Node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management. + node: The base Kubernetes node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management. catalog.cattle.io.clusterrepo: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster.' - catalog.cattle.io.clusterrepo.local: ' A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.' + catalog.cattle.io.clusterrepo.local: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.' catalog.cattle.io.operation: An operation is the list of recent Helm operations that have been applied to the cluster. catalog.cattle.io.app: An installed application is a Helm 3 chart that was installed either via our charts or through the Helm CLI. - logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected Cluster Output. + logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected cluster output. logging.banzaicloud.io.clusteroutput: A cluster output defines which logging providers that logs can be sent to and is only effective when deployed in the namespace that the logging operator is in. logging.banzaicloud.io.flow: A flow defines which logs to collect and filter as well as which output to send the logs. The flow is a namespaced resource, which means logs will only be collected from the namespace that the flow is deployed in. logging.banzaicloud.io.output: An output defines which logging providers that logs can be sent to. The output needs to be in the same namespace as the flow that is using it. @@ -6349,8 +6349,8 @@ typeLabel: } catalog.cattle.io.app: |- {count, plural, - one { Installed App } - other { Installed Apps } + one { Installed Application } + other { Installed Applications } } catalog.cattle.io.clusterrepo: |- {count, plural, @@ -6359,18 +6359,18 @@ typeLabel: } catalog.cattle.io.repo: |- {count, plural, - one { Namespaced Repo } - other { Namespaced Repos } + one { Namespaced Repository } + other { Namespaced Repositories } } chartinstallaction: |- {count, plural, - one { App } - other { Apps } + one { Application } + other { Applications } } chartupgradeaction: |- {count, plural, - one { App } - other { Apps } + one { Application } + other { Applications } } cloudcredential: |- {count, plural, @@ -6394,8 +6394,8 @@ typeLabel: } fleet.cattle.io.gitrepo: |- {count, plural, - one { Git Repo } - other {Git Repos } + one { Git Repository } + other {Git Repositories } } management.cattle.io.authconfig: |- {count, plural, @@ -6500,8 +6500,8 @@ typeLabel: } 'management.cattle.io.cluster': |- {count, plural, - one { Mgmt Cluster } - other { Mgmt Clusters } + one { Manaagement Cluster } + other { Management Clusters } } 'cluster.x-k8s.io.cluster': |- {count, plural, @@ -6680,8 +6680,8 @@ typeLabel: } harvesterhci.io.cloudtemplate: |- {count, plural, - one { Cloud Config Template } - other { Cloud Config Templates } + one { Cloud Configuration Template } + other { Cloud Configuration Templates } } fleet.cattle.io.content: |- {count, plural, @@ -6700,8 +6700,8 @@ typeLabel: } k3s.cattle.io.addon: |- {count, plural, - one { Addon } - other { Addons } + one { Add-on } + other { Add-ons } } management.cattle.io.apiservice: |- {count, plural, @@ -6920,7 +6920,7 @@ keyValue: registryMirror: header: Mirrors - toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.' + toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.' addLabel: Add Mirror description: Mirrors define the names and endpoints for private registries. The endpoints are tried one by one, and the first working one is used. hostnameLabel: Registry Hostname @@ -6968,12 +6968,12 @@ advancedSettings: 'cluster-defaults': 'Override RKE Defaults when creating new clusters.' 'engine-install-url': 'Default Docker engine installation URL (for most node drivers).' 'engine-iso-url': 'Default OS installation URL (for vSphere driver).' - 'engine-newest-version': 'The newest supported version of Docker at the time of this release. A Docker version that does not satisfy supported docker range but is newer than this will be marked as untested.' - 'engine-supported-range': 'Semver range for supported Docker engine versions. Versions which do not satisfy this range will be marked unsupported in the UI.' - 'ingress-ip-domain': 'Wildcard DNS domain to use for automatically generated Ingress hostnames. .. will be added to the domain.' + 'engine-newest-version': 'The newest supported version of Docker at the time of this release. A Docker version that does not satisfy supported docker range but is newer than this will be marked as untested.' + 'engine-supported-range': 'Semver range for supported Docker engine versions. Versions which do not satisfy this range will be marked unsupported in the UI.' + 'ingress-ip-domain': 'Wildcard DNS domain to use for automatically generated ingress hostnames. .. will be added to the domain.' 'server-url': 'Default {appName} install url. Must be HTTPS. All nodes in your cluster must be able to reach this.' - 'system-default-registry': 'Private registry to be used for all Rancher System Container Images. If no value is specified, the default registry for the container runtime is used. For Docker and containerd, the default is `docker.io`.' - 'ui-index': 'HTML index location for the Cluster Manager UI.' + 'system-default-registry': 'Private registry to be used for all Rancher system container images. If no value is specified, the default registry for the container runtime is used. For Docker and containerd, the default is `docker.io`.' + 'ui-index': 'HTML index location for the cluster manager UI.' 'ui-dashboard-index': 'HTML index location for the {appName} UI.' 'ui-offline-preferred': 'Controls whether UI assets are served locally by the server container or from the remote URL defined in the ui-index and ui-dashboard-index settings. The `Dynamic` option will use local assets in production builds of {appName}.' 'ui-pl': 'Private-Label company name.' @@ -7038,19 +7038,19 @@ performance: label: Incremental Loading setting: You can configure the threshold above which incremental loading will be used. description: |- - When enabled, resources will appear more quickly, but it may take slightly longer to load the entire set of resources. This setting only applies to resources that come from the Kubernetes API + When enabled, resources will appear more quickly, but it may take slightly longer to load the entire set of resources. This setting only applies to resources that come from the Kubernetes API. checkboxLabel: Enable incremental loading inputLabel: Resource Threshold - incompatibleDescription: "Incremental Loading is incomaptible with Namespace/Project filtering. Enabling this will disable it." + incompatibleDescription: "Incremental Loading is incomaptible with namespace or project filtering. Enabling this will disable it." manualRefresh: label: Manual Refresh setting: You can configure a threshold above which manual refresh will be enabled. buttonTooltip: Refresh list description: |- - When enabled, list data will not auto-update but instead the user must manually trigger a list-view refresh. This setting only applies to resources that come from the Kubernetes API + When enabled, list data will not auto-update but instead the user must manually trigger a list-view refresh. This setting only applies to resources that come from the Kubernetes API. checkboxLabel: Enable manual refresh of data for lists inputLabel: Resource Threshold - incompatibleDescription: "Manual Refresh is incomaptible with Namespace/Project filtering. Enabling this will disable it." + incompatibleDescription: "Manual Refresh is incomaptible with namespace or project filtering. Enabling this will disable it." websocketNotification: label: Websocket Notifications description: |- @@ -7058,7 +7058,7 @@ performance: checkboxLabel: Disable websocket notifications gc: label: Resource Garbage Collection - description: The UI will cache kuberentes resources locally to avoid having to re-fetch them. In some cases this can lead to a large amount of data stored in the browser. Enable this setting to periodically remove them. + description: The UI will cache Kuberentes resources locally to avoid having to re-fetch them. In some cases, this can lead to a large amount of data stored in the browser. Enable this setting to periodically remove them. checkboxLabel: Enable Garbage Collection whenRun: description: Update when garbage collection runs @@ -7072,25 +7072,25 @@ performance: howRun: description: Update how garbage collection runs age: - description: "Resource types musn't have been accessed within this period to be considered for garbage collection." + description: "Resource types must not have been accessed within this period to be considered for garbage collection." inputLabel: Resource Age count: description: Resource types must exceed this amount to be considered for garbage collection. inputLabel: Resource Count nsFiltering: label: Require Namespace / Project Filtering - description: Require the user to select namespaces and/or projects. This restricts the number of resources fetched when viewing lists and should help the responsiveness of the UI in systems with a lot of resources. - checkboxLabel: Enable Required Namespace / Project Filtering - incompatibleDescription: "Required Namespace / Project Filtering is incomaptible with Manual Refresh and Incremental Loading. Enabling this will disable them." + description: Require the user to select namespaces or projects. This restricts the number of resources fetched when viewing lists and should help the responsiveness of the UI in systems with a lot of resources. + checkboxLabel: Enable Required Namespace or Project Filtering + incompatibleDescription: "Required namespace or project filtering is incomaptible with manual refresh and incremental Loading. Enabling this will disable them." advancedWorker: label: Websocket Web Worker - description: Updates to resources pushed to the UI come via WebSocket and are handled in the UI thread. Enable this option to handle cluster WebSocket updates in a Web Worker in a separate thread. This should help the responsiveness of the UI in systems where resources change often. + description: Updates to resources pushed to the UI come via WebSocket and are handled in the UI thread. Enable this option to handle cluster WebSocket updates in a web worker in a separate thread. This should help the responsiveness of the UI in systems where resources change often. checkboxLabel: Enable Advanced Websocket Web Worker inactivity: title: Inactivity checkboxLabel: Enable inactivity session expiration inputLabel: Inactivity timeout (minutes) - information: To change the automatic logout behaviour, edit the authorisation and/or session token timeout values (auth-user-session-ttl-minutes and auth-token-max-ttl-minutes) in the Settings page. + information: To change the automatic log out behaviour, edit the authorisation and session token timeout values (auth-user-session-ttl-minutes and auth-token-max-ttl-minutes) in the settings page. description: When enabled and the user is inactive past the specified timeout, the UI will no longer fresh page content and the user must reload the page to continue. authUserTTL: This timeout cannot be higher than the user session timeout auth-user-session-ttl-minutes, which is currently {current} minutes. @@ -7261,8 +7261,8 @@ support: text: Login to SUSE Customer Center to access support for your subscription action: SUSE Customer Center aws: - generateConfig: Generate Support Config - text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support config file below.' + generateConfig: Generate Support Configuration + text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support configuration file below.' promos: one: title: 24x7 Support @@ -7307,7 +7307,7 @@ legacy: project: label: Project - select: "Use the Project/Namespace filter at the top of the page to select a Project in order to see legacy Project features." + select: "Use the namespace or project filter at the top of the page to select a project in order to see legacy project features." serverUpgrade: title: "{vendor} Server Changed"