diff --git a/deploy/charts/harvester/Chart.lock b/deploy/charts/harvester/Chart.lock index fd8511243f7..b7690359d9f 100644 --- a/deploy/charts/harvester/Chart.lock +++ b/deploy/charts/harvester/Chart.lock @@ -15,8 +15,8 @@ dependencies: repository: file://dependency_charts/csi-snapshotter version: 0.2.0 - name: longhorn - repository: https://charts.longhorn.io - version: 1.6.0 + repository: file://dependency_charts/longhorn + version: 1.6.0-dev - name: kube-vip repository: https://kube-vip.github.io/helm-charts version: 0.5.0 @@ -32,5 +32,5 @@ dependencies: - name: snapshot-validation-webhook repository: file://dependency_charts/snapshot-validation-webhook version: 0.2.0 -digest: sha256:3eb1b33ce3c8d4aaeea7ff0f78a3b88c036cf919e8356237dffc5cc0fe09048e -generated: "2024-04-19T12:59:09.937769+08:00" +digest: sha256:581058e681e6f1490011160bf9344fc9cba7e2c920c7d7267cc816b029d2a00d +generated: "2024-04-23T10:12:28.192734931+10:00" diff --git a/deploy/charts/harvester/Chart.yaml b/deploy/charts/harvester/Chart.yaml index c02a0627c9b..f11a8d8c96a 100644 --- a/deploy/charts/harvester/Chart.yaml +++ b/deploy/charts/harvester/Chart.yaml @@ -39,8 +39,9 @@ dependencies: repository: file://dependency_charts/csi-snapshotter condition: csi-snapshotter.enabled - name: longhorn - version: 1.6.0 - repository: https://charts.longhorn.io + version: 1.6.0-dev + #repository: https://charts.longhorn.io + repository: file://dependency_charts/longhorn condition: longhorn.enabled - name: kube-vip version: 0.5.0 diff --git a/deploy/charts/harvester/charts/longhorn-1.6.0-dev.tgz b/deploy/charts/harvester/charts/longhorn-1.6.0-dev.tgz new file mode 100644 index 00000000000..4224a2cfa87 Binary files /dev/null and b/deploy/charts/harvester/charts/longhorn-1.6.0-dev.tgz differ diff --git a/deploy/charts/harvester/charts/longhorn-1.6.0.tgz b/deploy/charts/harvester/charts/longhorn-1.6.0.tgz deleted file mode 100644 index 375e89043ca..00000000000 Binary files a/deploy/charts/harvester/charts/longhorn-1.6.0.tgz and /dev/null differ diff --git a/deploy/charts/harvester/dependency_charts/longhorn/.helmignore b/deploy/charts/harvester/dependency_charts/longhorn/.helmignore new file mode 100644 index 00000000000..f0c13194444 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/deploy/charts/harvester/dependency_charts/longhorn/Chart.yaml b/deploy/charts/harvester/dependency_charts/longhorn/Chart.yaml new file mode 100644 index 00000000000..8d071db4e03 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +name: longhorn +version: 1.6.0-dev +appVersion: v1.6.0-dev +kubeVersion: ">=1.21.0-0" +description: Longhorn is a distributed block storage system for Kubernetes. +keywords: +- longhorn +- storage +- distributed +- block +- device +- iscsi +- nfs +home: https://github.com/longhorn/longhorn +sources: +- https://github.com/longhorn/longhorn +- https://github.com/longhorn/longhorn-engine +- https://github.com/longhorn/longhorn-instance-manager +- https://github.com/longhorn/longhorn-share-manager +- https://github.com/longhorn/longhorn-manager +- https://github.com/longhorn/longhorn-ui +- https://github.com/longhorn/longhorn-tests +- https://github.com/longhorn/backing-image-manager +maintainers: +- name: Longhorn maintainers + email: maintainers@longhorn.io +icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png diff --git a/deploy/charts/harvester/dependency_charts/longhorn/README.md b/deploy/charts/harvester/dependency_charts/longhorn/README.md new file mode 100644 index 00000000000..cef77222915 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/README.md @@ -0,0 +1,329 @@ +# Longhorn Chart + +> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only. + +> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/). + +## Source Code + +Longhorn is 100% open source software. Project source code is spread across a number of repos: + +1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine +2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager +3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager +4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager +5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager +6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui + +## Prerequisites + +1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.) +2. Kubernetes >= v1.21 +3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster. +4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`. + +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, you may have to clean up your Helm release secrets. +Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. + +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. + +## Installation + +1. Add Longhorn chart repository. +``` +helm repo add longhorn https://charts.longhorn.io +``` + +2. Update local Longhorn chart information from chart repository. +``` +helm repo update +``` + +3. Use the following commands to create the `longhorn-system` namespace first, then install the Longhorn chart. + +``` +kubectl create namespace longhorn-system +helm install longhorn longhorn/longhorn --namespace longhorn-system +``` + +## Uninstallation + +``` +kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag +helm uninstall longhorn -n longhorn-system +kubectl delete namespace longhorn-system +``` + +## Values + +The `values.yaml` contains items used to tweak a deployment of this chart. + +### Cattle Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| global.cattle.systemDefaultRegistry | string | `""` | Default system registry. | +| global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector | string | `"kubernetes.io/os:linux"` | Node selector for system-managed Longhorn components. | +| global.cattle.windowsCluster.defaultSetting.taintToleration | string | `"cattle.io/os=linux:NoSchedule"` | Toleration for system-managed Longhorn components. | +| global.cattle.windowsCluster.enabled | bool | `false` | Setting that allows Longhorn to run on a Rancher Windows cluster. | +| global.cattle.windowsCluster.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node selector for Linux nodes that can run user-deployed Longhorn components. | +| global.cattle.windowsCluster.tolerations | list | `[{"effect":"NoSchedule","key":"cattle.io/os","operator":"Equal","value":"linux"}]` | Toleration for Linux nodes that can run user-deployed Longhorn components. | + +### Network Policies + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| networkPolicies.enabled | bool | `false` | Setting that allows you to enable network policies that control access to Longhorn pods. | +| networkPolicies.type | string | `"k3s"` | Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1") | + +### Image Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| image.csi.attacher.repository | string | `"longhornio/csi-attacher"` | Repository for the CSI attacher image. When unspecified, Longhorn uses the default value. | +| image.csi.attacher.tag | string | `"v4.4.2"` | Tag for the CSI attacher image. When unspecified, Longhorn uses the default value. | +| image.csi.livenessProbe.repository | string | `"longhornio/livenessprobe"` | Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value. | +| image.csi.livenessProbe.tag | string | `"v2.12.0"` | Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value. | +| image.csi.nodeDriverRegistrar.repository | string | `"longhornio/csi-node-driver-registrar"` | Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. | +| image.csi.nodeDriverRegistrar.tag | string | `"v2.9.2"` | Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. | +| image.csi.provisioner.repository | string | `"longhornio/csi-provisioner"` | Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value. | +| image.csi.provisioner.tag | string | `"v3.6.2"` | Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value. | +| image.csi.resizer.repository | string | `"longhornio/csi-resizer"` | Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value. | +| image.csi.resizer.tag | string | `"v1.9.2"` | Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value. | +| image.csi.snapshotter.repository | string | `"longhornio/csi-snapshotter"` | Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. | +| image.csi.snapshotter.tag | string | `"v6.3.2"` | Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. | +| image.longhorn.backingImageManager.repository | string | `"longhornio/backing-image-manager"` | Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value. | +| image.longhorn.backingImageManager.tag | string | `"master-head"` | Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value. | +| image.longhorn.engine.repository | string | `"longhornio/longhorn-engine"` | Repository for the Longhorn Engine image. | +| image.longhorn.engine.tag | string | `"master-head"` | Tag for the Longhorn Engine image. | +| image.longhorn.instanceManager.repository | string | `"longhornio/longhorn-instance-manager"` | Repository for the Longhorn Instance Manager image. | +| image.longhorn.instanceManager.tag | string | `"master-head"` | Tag for the Longhorn Instance Manager image. | +| image.longhorn.manager.repository | string | `"longhornio/longhorn-manager"` | Repository for the Longhorn Manager image. | +| image.longhorn.manager.tag | string | `"master-head"` | Tag for the Longhorn Manager image. | +| image.longhorn.shareManager.repository | string | `"longhornio/longhorn-share-manager"` | Repository for the Longhorn Share Manager image. | +| image.longhorn.shareManager.tag | string | `"master-head"` | Tag for the Longhorn Share Manager image. | +| image.longhorn.supportBundleKit.repository | string | `"longhornio/support-bundle-kit"` | Repository for the Longhorn Support Bundle Manager image. | +| image.longhorn.supportBundleKit.tag | string | `"v0.0.36"` | Tag for the Longhorn Support Bundle Manager image. | +| image.longhorn.ui.repository | string | `"longhornio/longhorn-ui"` | Repository for the Longhorn UI image. | +| image.longhorn.ui.tag | string | `"master-head"` | Tag for the Longhorn UI image. | +| image.openshift.oauthProxy.repository | string | `"quay.io/openshift/origin-oauth-proxy"` | Repository for the OAuth Proxy image. This setting applies only to OpenShift users. | +| image.openshift.oauthProxy.tag | float | `4.14` | Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14. | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI. | + +### Service Settings + +| Key | Description | +|-----|-------------| +| service.manager.nodePort | NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767. | +| service.manager.type | Service type for Longhorn Manager. | +| service.ui.nodePort | NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767. | +| service.ui.type | Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy") | + +### StorageClass Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| persistence.backingImage.dataSourceParameters | string | `nil` | Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) | +| persistence.backingImage.dataSourceType | string | `nil` | Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type. | +| persistence.backingImage.enable | bool | `false` | Setting that allows you to use a backing image in a Longhorn StorageClass. | +| persistence.backingImage.expectedChecksum | string | `nil` | Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. | +| persistence.backingImage.name | string | `nil` | Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image. | +| persistence.defaultClass | bool | `true` | Setting that allows you to specify the default Longhorn StorageClass. | +| persistence.defaultClassReplicaCount | int | `3` | Replica count of the default Longhorn StorageClass. | +| persistence.defaultDataLocality | string | `"disabled"` | Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort") | +| persistence.defaultFsType | string | `"ext4"` | Filesystem type of the default Longhorn StorageClass. | +| persistence.defaultMkfsParams | string | `""` | mkfs parameters of the default Longhorn StorageClass. | +| persistence.defaultNodeSelector.enable | bool | `false` | Setting that allows you to enable the node selector for the default Longhorn StorageClass. | +| persistence.defaultNodeSelector.selector | string | `""` | Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast") | +| persistence.migratable | bool | `false` | Setting that allows you to enable live migration of a Longhorn volume from one node to another. | +| persistence.nfsOptions | string | `""` | Set NFS mount options for Longhorn StorageClass for RWX volumes | +| persistence.reclaimPolicy | string | `"Delete"` | Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete") | +| persistence.recurringJobSelector.enable | bool | `false` | Setting that allows you to enable the recurring job selector for a Longhorn StorageClass. | +| persistence.recurringJobSelector.jobList | list | `[]` | Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`) | +| persistence.removeSnapshotsDuringFilesystemTrim | string | `"ignored"` | Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled") | + +### CSI Settings + +| Key | Description | +|-----|-------------| +| csi.attacherReplicaCount | Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3"). | +| csi.kubeletRootDir | kubelet root directory. When unspecified, Longhorn uses the default value. | +| csi.provisionerReplicaCount | Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3"). | +| csi.resizerReplicaCount | Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3"). | +| csi.snapshotterReplicaCount | Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3"). | + +### Longhorn Manager Settings + +Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Manager. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| longhornManager.log.format | string | `"plain"` | Format of Longhorn Manager logs. (Options: "plain", "json") | +| longhornManager.nodeSelector | object | `{}` | Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager. | +| longhornManager.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn Manager. | +| longhornManager.serviceAnnotations | object | `{}` | Annotation for the Longhorn Manager service. | +| longhornManager.tolerations | list | `[]` | Toleration for Longhorn Manager on nodes allowed to run Longhorn Manager. | + +### Longhorn Driver Settings + +Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Driver. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| longhornDriver.nodeSelector | object | `{}` | Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver. | +| longhornDriver.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn Driver. | +| longhornDriver.tolerations | list | `[]` | Toleration for Longhorn Driver on nodes allowed to run Longhorn components. | + +### Longhorn UI Settings + +Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn UI. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| longhornUI.nodeSelector | object | `{}` | Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI. | +| longhornUI.priorityClass | string | `"longhorn-critical"` | PriorityClass for Longhorn UI. | +| longhornUI.replicas | int | `2` | Replica count for Longhorn UI. | +| longhornUI.tolerations | list | `[]` | Toleration for Longhorn UI on nodes allowed to run Longhorn components. | + +### Ingress Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| ingress.annotations | string | `nil` | Ingress annotations in the form of key-value pairs. | +| ingress.enabled | bool | `false` | Setting that allows Longhorn to generate ingress records for the Longhorn UI service. | +| ingress.host | string | `"sslip.io"` | Hostname of the Layer 7 load balancer. | +| ingress.ingressClassName | string | `nil` | IngressClass resource that contains ingress configuration, including the name of the Ingress controller. ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases. | +| ingress.path | string | `"/"` | Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}. | +| ingress.secrets | string | `nil` | Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses. | +| ingress.secureBackends | bool | `false` | Setting that allows you to enable secure connections to the Longhorn UI service via port 443. | +| ingress.tls | bool | `false` | Setting that allows you to enable TLS on ingress records. | +| ingress.tlsSecret | string | `"longhorn.local-tls"` | TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records. | + +### Private Registry Settings + +You can install Longhorn in an air-gapped environment with a private registry. For more information, see the **Air Gap Installation** section of the [documentation](https://longhorn.io/docs). + +| Key | Description | +|-----|-------------| +| privateRegistry.createSecret | Setting that allows you to create a private registry secret. | +| privateRegistry.registryPasswd | Password for authenticating with a private registry. | +| privateRegistry.registrySecret | Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name. | +| privateRegistry.registryUrl | URL of a private registry. When unspecified, Longhorn uses the default system registry. | +| privateRegistry.registryUser | User account used for authenticating with a private registry. | + +### OS/Kubernetes Distro Settings + +#### OpenShift Settings + +For more details, see the [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md). + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| openshift.enabled | bool | `false` | Setting that allows Longhorn to integrate with OpenShift. | +| openshift.ui.port | int | `443` | Port for accessing the OpenShift web console. | +| openshift.ui.proxy | int | `8443` | Port for proxy that provides access to the OpenShift web console. | +| openshift.ui.route | string | `"longhorn-ui"` | Route for connections between Longhorn and the OpenShift web console. | + +### Other Settings + +| Key | Default | Description | +|-----|---------|-------------| +| annotations | `{}` | Annotation for the Longhorn Manager DaemonSet pods. This setting is optional. | +| enableGoCoverDir | `false` | Setting that allows Longhorn to generate code coverage profiles. | +| enablePSP | `false` | Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled. | +| metrics.serviceMonitor.enabled | `false` | Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components. | +| namespaceOverride | `""` | Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`. | +| preUpgradeChecker.jobEnabled | `true` | Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions. | +| preUpgradeChecker.upgradeVersionCheck | `true` | Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled. | + +### System Default Settings + +During installation, you can either allow Longhorn to use the default system settings or use specific flags to modify the default values. After installation, you can modify the settings using the Longhorn UI. For more information, see the **Settings Reference** section of the [documentation](https://longhorn.io/docs). + +| Key | Description | +|-----|-------------| +| defaultSettings.allowCollectingLonghornUsageMetrics | Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses. | +| defaultSettings.allowEmptyDiskSelectorVolume | Setting that allows scheduling of empty disk selector volumes to any disk. | +| defaultSettings.allowEmptyNodeSelectorVolume | Setting that allows scheduling of empty node selector volumes to any node. | +| defaultSettings.allowRecurringJobWhileVolumeDetached | Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run. | +| defaultSettings.allowVolumeCreationWithDegradedAvailability | Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation. | +| defaultSettings.autoCleanupRecurringJobBackupSnapshot | Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job. | +| defaultSettings.autoCleanupSystemGeneratedSnapshot | Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed. | +| defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly | Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting. | +| defaultSettings.autoSalvage | Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default. | +| defaultSettings.backingImageCleanupWaitInterval | Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it. | +| defaultSettings.backingImageRecoveryWaitInterval | Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown". | +| defaultSettings.backupCompressionMethod | Setting that allows you to specify a backup compression method. | +| defaultSettings.backupConcurrentLimit | Maximum number of worker threads that can concurrently run for each backup. | +| defaultSettings.backupTarget | Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE") | +| defaultSettings.backupTargetCredentialSecret | Name of the Kubernetes secret associated with the backup target. | +| defaultSettings.backupstorePollInterval | Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled. | +| defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit | Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version. | +| defaultSettings.concurrentReplicaRebuildPerNodeLimit | Maximum number of replicas that can be concurrently rebuilt on each node. | +| defaultSettings.concurrentVolumeBackupRestorePerNodeLimit | Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled. | +| defaultSettings.createDefaultDiskLabeledNodes | Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster. | +| defaultSettings.defaultDataLocality | Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume. | +| defaultSettings.defaultDataPath | Default path for storing data on a host. The default value is "/var/lib/longhorn/". | +| defaultSettings.defaultLonghornStaticStorageClass | Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static". | +| defaultSettings.defaultReplicaCount | Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3". | +| defaultSettings.deletingConfirmationFlag | Flag that prevents accidental uninstallation of Longhorn. | +| defaultSettings.detachManuallyAttachedVolumesWhenCordoned | Setting that allows automatic detaching of manually-attached volumes when a node is cordoned. | +| defaultSettings.disableRevisionCounter | Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI. | +| defaultSettings.disableSchedulingOnCordonedNode | Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default. | +| defaultSettings.disableSnapshotPurge | Setting that temporarily prevents all attempts to purge volume snapshots. | +| defaultSettings.engineReplicaTimeout | Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8". | +| defaultSettings.failedBackupTTL | Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled. | +| defaultSettings.fastReplicaRebuildEnabled | Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check". | +| defaultSettings.guaranteedInstanceManagerCPU | Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12". | +| defaultSettings.kubernetesClusterAutoscalerEnabled | Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. | +| defaultSettings.logLevel | Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace") | +| defaultSettings.nodeDownPodDeletionPolicy | Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed. | +| defaultSettings.nodeDrainPolicy | Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained. | +| defaultSettings.offlineReplicaRebuilding | Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine. | +| defaultSettings.orphanAutoDeletion | Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up. | +| defaultSettings.priorityClass | PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. | +| defaultSettings.recurringFailedJobsHistoryLimit | Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained. | +| defaultSettings.recurringJobMaxRetention | Maximum number of snapshots or backups to be retained. | +| defaultSettings.recurringSuccessfulJobsHistoryLimit | Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained. | +| defaultSettings.removeSnapshotsDuringFilesystemTrim | Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files. | +| defaultSettings.replicaAutoBalance | Setting that automatically rebalances replicas when an available node is discovered. | +| defaultSettings.replicaDiskSoftAntiAffinity | Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default. | +| defaultSettings.replicaFileSyncHttpClientTimeout | Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed. | +| defaultSettings.replicaReplenishmentWaitInterval | Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume. | +| defaultSettings.replicaSoftAntiAffinity | Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default. | +| defaultSettings.replicaZoneSoftAntiAffinity | Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=" in the Kubernetes node object. | +| defaultSettings.restoreConcurrentLimit | Maximum number of worker threads that can concurrently run for each restore operation. | +| defaultSettings.restoreVolumeRecurringJobs | Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration. | +| defaultSettings.snapshotDataIntegrity | Setting that allows you to enable and disable snapshot hashing and data integrity checks. | +| defaultSettings.snapshotDataIntegrityCronjob | Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format. | +| defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation | Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance. | +| defaultSettings.snapshotMaxCount | Maximum snapshot count for a volume. The value should be between 2 to 250 | +| defaultSettings.storageMinimalAvailablePercentage | Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25". | +| defaultSettings.storageNetwork | Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network. | +| defaultSettings.storageOverProvisioningPercentage | Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100". | +| defaultSettings.storageReservedPercentageForDefaultDisk | Percentage of disk space that is not allocated to the default disk on each new Longhorn node. | +| defaultSettings.supportBundleFailedHistoryLimit | Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles. | +| defaultSettings.systemManagedComponentsNodeSelector | Node selector for system-managed Longhorn components. | +| defaultSettings.systemManagedPodsImagePullPolicy | Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart. | +| defaultSettings.taintToleration | Taint or toleration for system-managed Longhorn components. | +| defaultSettings.upgradeChecker | Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default | +| defaultSettings.v1DataEngine | Setting that allows you to enable the V1 Data Engine. | +| defaultSettings.v2DataEngine | Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. | +| defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU | Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". | +| defaultSettings.v2DataEngineHugepageLimit | Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine. | +| defaultSettings.v2DataEngineLogLevel | Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine. | +| defaultSettings.v2DataEngineLogFlags | Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine. | + +--- +Please see [link](https://github.com/longhorn/longhorn) for more information. diff --git a/deploy/charts/harvester/dependency_charts/longhorn/README.md.gotmpl b/deploy/charts/harvester/dependency_charts/longhorn/README.md.gotmpl new file mode 100644 index 00000000000..f199a2ae138 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/README.md.gotmpl @@ -0,0 +1,239 @@ +# Longhorn Chart + +> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only. + +> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +> **Note**: Use Helm 3 when installing and upgrading Longhorn. Helm 2 is [no longer supported](https://helm.sh/blog/helm-2-becomes-unsupported/). + +## Source Code + +Longhorn is 100% open source software. Project source code is spread across a number of repos: + +1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine +2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager +3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager +4. Backing Image Manager -- Backing image file lifecycle management. https://github.com/longhorn/backing-image-manager +5. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager +6. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui + +## Prerequisites + +1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.) +2. Kubernetes >= v1.21 +3. Make sure `bash`, `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster. +4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. + +## Upgrading to Kubernetes v1.25+ + +Starting in Kubernetes v1.25, [Pod Security Policies](https://kubernetes.io/docs/concepts/security/pod-security-policy/) have been removed from the Kubernetes API. + +As a result, **before upgrading to Kubernetes v1.25** (or on a fresh install in a Kubernetes v1.25+ cluster), users are expected to perform an in-place upgrade of this chart with `enablePSP` set to `false` if it has been previously set to `true`. + +> **Note:** +> If you upgrade your cluster to Kubernetes v1.25+ before removing PSPs via a `helm upgrade` (even if you manually clean up resources), **it will leave the Helm release in a broken state within the cluster such that further Helm operations will not work (`helm uninstall`, `helm upgrade`, etc.).** +> +> If your charts get stuck in this state, you may have to clean up your Helm release secrets. +Upon setting `enablePSP` to false, the chart will remove any PSP resources deployed on its behalf from the cluster. This is the default setting for this chart. + +As a replacement for PSPs, [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) should be used. Please consult the Longhorn docs for more details on how to configure your chart release namespaces to work with the new Pod Security Admission and apply Pod Security Standards. + +## Installation + +1. Add Longhorn chart repository. +``` +helm repo add longhorn https://charts.longhorn.io +``` + +2. Update local Longhorn chart information from chart repository. +``` +helm repo update +``` + +3. Use the following commands to create the `longhorn-system` namespace first, then install the Longhorn chart. + +``` +kubectl create namespace longhorn-system +helm install longhorn longhorn/longhorn --namespace longhorn-system +``` + +## Uninstallation + +``` +kubectl -n longhorn-system patch -p '{"value": "true"}' --type=merge lhs deleting-confirmation-flag +helm uninstall longhorn -n longhorn-system +kubectl delete namespace longhorn-system +``` + +## Values + +The `values.yaml` contains items used to tweak a deployment of this chart. + +### Cattle Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "global" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Network Policies + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "networkPolicies" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Image Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "image" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Service Settings + +| Key | Description | +|-----|-------------| +{{- range .Values }} + {{- if (and (hasPrefix "service" .Key) (not (contains "Account" .Key))) }} +| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### StorageClass Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "persistence" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### CSI Settings + +| Key | Description | +|-----|-------------| +{{- range .Values }} + {{- if hasPrefix "csi" .Key }} +| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Longhorn Manager Settings + +Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Manager. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "longhornManager" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Longhorn Driver Settings + +Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn Driver. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "longhornDriver" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Longhorn UI Settings + +Longhorn consists of user-deployed components (for example, Longhorn Manager, Longhorn Driver, and Longhorn UI) and system-managed components (for example, Instance Manager, Backing Image Manager, Share Manager, CSI Driver, and Engine Image). The following settings only apply to Longhorn UI. + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "longhornUI" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Ingress Settings + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "ingress" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Private Registry Settings + +You can install Longhorn in an air-gapped environment with a private registry. For more information, see the **Air Gap Installation** section of the [documentation](https://longhorn.io/docs). + +| Key | Description | +|-----|-------------| +{{- range .Values }} + {{- if hasPrefix "privateRegistry" .Key }} +| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### OS/Kubernetes Distro Settings + +#### OpenShift Settings + +For more details, see the [ocp-readme](https://github.com/longhorn/longhorn/blob/master/chart/ocp-readme.md). + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +{{- range .Values }} + {{- if hasPrefix "openshift" .Key }} +| {{ .Key }} | {{ .Type }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### Other Settings + +| Key | Default | Description | +|-----|---------|-------------| +{{- range .Values }} + {{- if not (or (hasPrefix "defaultSettings" .Key) + (hasPrefix "networkPolicies" .Key) + (hasPrefix "image" .Key) + (hasPrefix "service" .Key) + (hasPrefix "persistence" .Key) + (hasPrefix "csi" .Key) + (hasPrefix "longhornManager" .Key) + (hasPrefix "longhornDriver" .Key) + (hasPrefix "longhornUI" .Key) + (hasPrefix "privateRegistry" .Key) + (hasPrefix "ingress" .Key) + (hasPrefix "openshift" .Key) + (hasPrefix "global" .Key)) }} +| {{ .Key }} | {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +### System Default Settings + +During installation, you can either allow Longhorn to use the default system settings or use specific flags to modify the default values. After installation, you can modify the settings using the Longhorn UI. For more information, see the **Settings Reference** section of the [documentation](https://longhorn.io/docs). + +| Key | Description | +|-----|-------------| +{{- range .Values }} + {{- if hasPrefix "defaultSettings" .Key }} +| {{ .Key }} | {{ if .Description }}{{ .Description }}{{ else }}{{ .AutoDescription }}{{ end }} | + {{- end }} +{{- end }} + +--- +Please see [link](https://github.com/longhorn/longhorn) for more information. diff --git a/deploy/charts/harvester/dependency_charts/longhorn/app-readme.md b/deploy/charts/harvester/dependency_charts/longhorn/app-readme.md new file mode 100644 index 00000000000..cb23135cad9 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/app-readme.md @@ -0,0 +1,11 @@ +# Longhorn + +Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn. + +Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups! + +**Important**: Please install Longhorn chart in `longhorn-system` namespace only. + +**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md) diff --git a/deploy/charts/harvester/dependency_charts/longhorn/ocp-readme.md b/deploy/charts/harvester/dependency_charts/longhorn/ocp-readme.md new file mode 100644 index 00000000000..f7638775b09 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/ocp-readme.md @@ -0,0 +1,177 @@ +# OpenShift / OKD Extra Configuration Steps + +- [OpenShift / OKD Extra Configuration Steps](#openshift--okd-extra-configuration-steps) + - [Notes](#notes) + - [Known Issues](#known-issues) + - [Preparing Nodes (Optional)](#preparing-nodes-optional) + - [Default /var/lib/longhorn setup](#default-varliblonghorn-setup) + - [Separate /var/mnt/longhorn setup](#separate-varmntlonghorn-setup) + - [Create Filesystem](#create-filesystem) + - [Mounting Disk On Boot](#mounting-disk-on-boot) + - [Label and Annotate Nodes](#label-and-annotate-nodes) + - [Example values.yaml](#example-valuesyaml) + - [Installation](#installation) + - [Refs](#refs) + +## Notes + +Main changes and tasks for OCP are: + +- On OCP / OKD, the Operating System is Managed by the Cluster +- OCP Imposes [Security Context Constraints](https://docs.openshift.com/container-platform/4.11/authentication/managing-security-context-constraints.html) + - This requires everything to run with the least privilege possible. For the moment every component has been given access to run as higher privilege. + - Something to circle back on is network polices and which components can have their privileges reduced without impacting functionality. + - The UI probably can be for example. +- openshift/oauth-proxy for authentication to the Longhorn Ui + - **⚠️** Currently Scoped to Authenticated Users that can delete a longhorn settings object. + - **⚠️** Since the UI it self is not protected, network policies will need to be created to prevent namespace <--> namespace communication against the pod or service object directly. + - Anyone with access to the UI Deployment can remove the route restriction. (Namespace Scoped Admin) +- Option to use separate disk in /var/mnt/longhorn & MachineConfig file to mount /var/mnt/longhorn +- Adding finalizers for mount propagation + +## Known Issues + +- General Feature/Issue Thread + - [[FEATURE] Deploying Longhorn on OKD/Openshift](https://github.com/longhorn/longhorn/issues/1831) +- 4.10 / 1.23: + - 4.10.0-0.okd-2022-03-07-131213 to 4.10.0-0.okd-2022-07-09-073606 + - Tested, No Known Issues +- 4.11 / 1.24: + - 4.11.0-0.okd-2022-07-27-052000 to 4.11.0-0.okd-2022-11-19-050030 + - Tested, No Known Issues + - 4.11.0-0.okd-2022-12-02-145640, 4.11.0-0.okd-2023-01-14-152430: + - Workaround: [[BUG] Volumes Stuck in Attach/Detach Loop](https://github.com/longhorn/longhorn/issues/4988) + - [MachineConfig Patch](https://github.com/longhorn/longhorn/issues/4988#issuecomment-1345676772) +- 4.12 / 1.25: + - 4.12.0-0.okd-2022-12-05-210624 to 4.12.0-0.okd-2023-01-20-101927 + - Tested, No Known Issues + - 4.12.0-0.okd-2023-01-21-055900 to 4.12.0-0.okd-2023-02-18-033438: + - Workaround: [[BUG] Volumes Stuck in Attach/Detach Loop](https://github.com/longhorn/longhorn/issues/4988) + - [MachineConfig Patch](https://github.com/longhorn/longhorn/issues/4988#issuecomment-1345676772) + - 4.12.0-0.okd-2023-03-05-022504 - 4.12.0-0.okd-2023-04-16-041331: + - Tested, No Known Issues +- 4.13 / 1.26: + - 4.13.0-0.okd-2023-05-03-001308 - 4.13.0-0.okd-2023-08-18-135805: + - Tested, No Known Issues +- 4.14 / 1.27: + - 4.14.0-0.okd-2023-08-12-022330 - 4.14.0-0.okd-2023-10-28-073550: + - Tested, No Known Issues + +## Preparing Nodes (Optional) + +Only required if you require additional customizations, such as storage-less nodes, or secondary disks. + +### Default /var/lib/longhorn setup + +Label each node for storage with: + +```bash +oc get nodes --no-headers | awk '{print $1}' + +export NODE="worker-0" +oc label node "${NODE}" node.longhorn.io/create-default-disk=true +``` + +### Separate /var/mnt/longhorn setup + +#### Create Filesystem + +On the storage nodes create a filesystem with the label longhorn: + +```bash +oc get nodes --no-headers | awk '{print $1}' + +export NODE="worker-0" +oc debug node/${NODE} -t -- chroot /host bash + +# Validate Target Drive is Present +lsblk + +export DRIVE="sdb" #vdb +sudo mkfs.ext4 -L longhorn /dev/${DRIVE} +``` + +> ⚠️ Note: If you add New Nodes After the below Machine Config is applied, you will need to also reboot the node. + +#### Mounting Disk On Boot + +The Secondary Drive needs to be mounted on every boot. Save the Concents and Apply the MachineConfig with `oc apply -f`: + +> ⚠️ This will trigger an machine config profile update and reboot all worker nodes on the cluster + +```yaml +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: worker + name: 71-mount-storage-worker +spec: + config: + ignition: + version: 3.2.0 + systemd: + units: + - name: var-mnt-longhorn.mount + enabled: true + contents: | + [Unit] + Before=local-fs.target + [Mount] + Where=/var/mnt/longhorn + What=/dev/disk/by-label/longhorn + Options=rw,relatime,discard + [Install] + WantedBy=local-fs.target +``` + +#### Label and Annotate Nodes + +Label and annotate storage nodes like this: + +```bash +oc get nodes --no-headers | awk '{print $1}' + +export NODE="worker-0" +oc annotate node ${NODE} --overwrite node.longhorn.io/default-disks-config='[{"path":"/var/mnt/longhorn","allowScheduling":true}]' +oc label node ${NODE} node.longhorn.io/create-default-disk=config +``` + +## Example values.yaml + +Minimum Adjustments Required + +```yaml +openshift: + oauthProxy: + repository: quay.io/openshift/origin-oauth-proxy + tag: 4.14 # Use Your OCP/OKD 4.X Version, Current Stable is 4.14 + +# defaultSettings: # Preparing nodes (Optional) + # createDefaultDiskLabeledNodes: true + +openshift: + enabled: true + ui: + route: "longhorn-ui" + port: 443 + proxy: 8443 +``` + +## Installation + +```bash +# helm template ./chart/ --namespace longhorn-system --values ./chart/values.yaml --no-hooks > longhorn.yaml # Local Testing +helm template longhorn --namespace longhorn-system --values values.yaml --no-hooks > longhorn.yaml +oc create namespace longhorn-system -o yaml --dry-run=client | oc apply -f - +oc apply -f longhorn.yaml -n longhorn-system +``` + +## Refs + +- +- +- okd 4.5: +- okd 4.6: +- oauth-proxy: +- diff --git a/deploy/charts/harvester/dependency_charts/longhorn/questions.yaml b/deploy/charts/harvester/dependency_charts/longhorn/questions.yaml new file mode 100644 index 00000000000..605b1cd27d2 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/questions.yaml @@ -0,0 +1,927 @@ +categories: +- storage +namespace: longhorn-system +questions: +- variable: image.defaultImage + default: "true" + description: "Use default Longhorn images" + label: Use Default Images + type: boolean + show_subquestion_if: false + group: "Longhorn Images" + subquestions: + - variable: image.longhorn.manager.repository + default: longhornio/longhorn-manager + description: "Repository for the Longhorn Manager image." + type: string + label: Longhorn Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.manager.tag + default: master-head + description: "Tag for the Longhorn Manager image." + type: string + label: Longhorn Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.engine.repository + default: longhornio/longhorn-engine + description: "Repository for the Longhorn Engine image." + type: string + label: Longhorn Engine Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.engine.tag + default: master-head + description: "Tag for the Longhorn Engine image." + type: string + label: Longhorn Engine Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.ui.repository + default: longhornio/longhorn-ui + description: "Repository for the Longhorn UI image." + type: string + label: Longhorn UI Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.ui.tag + default: master-head + description: "Tag for the Longhorn UI image." + type: string + label: Longhorn UI Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.instanceManager.repository + default: longhornio/longhorn-instance-manager + description: "Repository for the Longhorn Instance Manager image." + type: string + label: Longhorn Instance Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.instanceManager.tag + default: master-head + description: "Tag for the Longhorn Instance Manager image." + type: string + label: Longhorn Instance Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.repository + default: longhornio/longhorn-share-manager + description: "Repository for the Longhorn Share Manager image." + type: string + label: Longhorn Share Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.tag + default: master-head + description: "Tag for the Longhorn Share Manager image." + type: string + label: Longhorn Share Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.repository + default: longhornio/backing-image-manager + description: "Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn Backing Image Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.tag + default: master-head + description: "Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn Backing Image Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.supportBundleKit.repository + default: longhornio/support-bundle-kit + description: "Repository for the Longhorn Support Bundle Manager image." + type: string + label: Longhorn Support Bundle Kit Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.supportBundleKit.tag + default: v0.0.36 + description: "Tag for the Longhorn Support Bundle Manager image." + type: string + label: Longhorn Support Bundle Kit Image Tag + group: "Longhorn Images Settings" + - variable: image.csi.attacher.repository + default: longhornio/csi-attacher + description: "Repository for the CSI attacher image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Attacher Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.attacher.tag + default: v4.4.2 + description: "Tag for the CSI attacher image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Attacher Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.provisioner.repository + default: longhornio/csi-provisioner + description: "Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Provisioner Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.provisioner.tag + default: v3.6.2 + description: "Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Provisioner Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.nodeDriverRegistrar.repository + default: longhornio/csi-node-driver-registrar + description: "Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Node Driver Registrar Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.nodeDriverRegistrar.tag + default: v2.9.2 + description: "Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Node Driver Registrar Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.resizer.repository + default: longhornio/csi-resizer + description: "Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Resizer Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.resizer.tag + default: v1.9.2 + description: "Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Resizer Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.repository + default: longhornio/csi-snapshotter + description: "Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Snapshotter Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.tag + default: v6.3.2 + description: "Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Driver Snapshotter Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.livenessProbe.repository + default: longhornio/livenessprobe + description: "Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Liveness Probe Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.livenessProbe.tag + default: v2.12.0 + description: "Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value." + type: string + label: Longhorn CSI Liveness Probe Image Tag + group: "Longhorn CSI Driver Images" +- variable: privateRegistry.registryUrl + label: Private registry URL + description: "URL of a private registry. When unspecified, Longhorn uses the default system registry." + group: "Private Registry Settings" + type: string + default: "" +- variable: privateRegistry.registrySecret + label: Private registry secret name + description: "Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name." + group: "Private Registry Settings" + type: string + default: "" +- variable: privateRegistry.createSecret + default: "true" + description: "Setting that allows you to create a private registry secret." + type: boolean + group: "Private Registry Settings" + label: Create Secret for Private Registry Settings + show_subquestion_if: true + subquestions: + - variable: privateRegistry.registryUser + label: Private registry user + description: "User account used for authenticating with a private registry." + type: string + default: "" + - variable: privateRegistry.registryPasswd + label: Private registry password + description: "Password for authenticating with a private registry." + type: password + default: "" +- variable: longhorn.default_setting + default: "false" + description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn." + label: "Customize Default Settings" + type: boolean + show_subquestion_if: true + group: "Longhorn Default Settings" + subquestions: + - variable: csi.kubeletRootDir + default: + description: "kubelet root directory. When unspecified, Longhorn uses the default value." + type: string + label: Kubelet Root Directory + group: "Longhorn CSI Driver Settings" + - variable: csi.attacherReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Attacher replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.provisionerReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Provisioner replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.resizerReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Resizer replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.snapshotterReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value (\"3\")." + label: Longhorn CSI Snapshotter replica count + group: "Longhorn CSI Driver Settings" + - variable: defaultSettings.backupTarget + label: Backup Target + description: "Endpoint used to access the backupstore. (Options: \"NFS\", \"CIFS\", \"AWS\", \"GCP\", \"AZURE\")" + group: "Longhorn Default Settings" + type: string + default: + - variable: defaultSettings.backupTargetCredentialSecret + label: Backup Target Credential Secret + description: "Name of the Kubernetes secret associated with the backup target." + group: "Longhorn Default Settings" + type: string + default: + - variable: defaultSettings.allowRecurringJobWhileVolumeDetached + label: Allow Recurring Job While Volume Is Detached + description: 'Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.snapshotMaxCount + label: Snapshot Maximum Count + description: 'Maximum snapshot count for a volume. The value should be between 2 to 250.' + group: "Longhorn Default Settings" + type: int + min: 2 + max: 250 + default: 250 + - variable: defaultSettings.createDefaultDiskLabeledNodes + label: Create Default Disk on Labeled Nodes + description: 'Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.defaultDataPath + label: Default Data Path + description: 'Default path for storing data on a host. The default value is "/var/lib/longhorn/".' + group: "Longhorn Default Settings" + type: string + default: "/var/lib/longhorn/" + - variable: defaultSettings.defaultDataLocality + label: Default Data Locality + description: 'Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume.' + group: "Longhorn Default Settings" + type: enum + options: + - "disabled" + - "best-effort" + default: "disabled" + - variable: defaultSettings.replicaSoftAntiAffinity + label: Replica Node Level Soft Anti-Affinity + description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default, false.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.replicaAutoBalance + label: Replica Auto Balance + description: 'Enable this setting automatically re-balances replicas when discovered an available node.' + group: "Longhorn Default Settings" + type: enum + options: + - "disabled" + - "least-effort" + - "best-effort" + default: "disabled" + - variable: defaultSettings.storageOverProvisioningPercentage + label: Storage Over Provisioning Percentage + description: "Percentage of storage that can be allocated relative to hard drive capacity. The default value is 100." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 100 + - variable: defaultSettings.storageMinimalAvailablePercentage + label: Storage Minimal Available Percentage + description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default, 25." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 100 + default: 25 + - variable: defaultSettings.storageReservedPercentageForDefaultDisk + label: Storage Reserved Percentage For Default Disk + description: "The reserved percentage specifies the percentage of disk space that will not be allocated to the default disk on each new Longhorn node." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 100 + default: 30 + - variable: defaultSettings.upgradeChecker + label: Enable Upgrade Checker + description: 'Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.defaultReplicaCount + label: Default Replica Count + description: "Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is \"3\"." + group: "Longhorn Default Settings" + type: int + min: 1 + max: 20 + default: 3 + - variable: defaultSettings.defaultLonghornStaticStorageClass + label: Default Longhorn Static StorageClass Name + description: "Default Longhorn StorageClass. \"storageClassName\" is assigned to PVs and PVCs that are created for an existing Longhorn volume. \"storageClassName\" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is \"longhorn-static\"." + group: "Longhorn Default Settings" + type: string + default: "longhorn-static" + - variable: defaultSettings.backupstorePollInterval + label: Backupstore Poll Interval + description: "Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is \"300\". When the value is \"0\", polling is disabled." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 300 + - variable: defaultSettings.failedBackupTTL + label: Failed Backup Time to Live + description: "Number of minutes that Longhorn keeps a failed backup resource. When the value is \"0\", automatic deletion is disabled." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1440 + - variable: defaultSettings.restoreVolumeRecurringJobs + label: Restore Volume Recurring Jobs + description: "Restore recurring jobs from the backup volume on the backup target and create recurring jobs if not exist during a backup restoration." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.recurringSuccessfulJobsHistoryLimit + label: Cronjob Successful Jobs History Limit + description: "This setting specifies how many successful backup or snapshot job histories should be retained. History will not be retained if the value is 0." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1 + - variable: defaultSettings.recurringFailedJobsHistoryLimit + label: Cronjob Failed Jobs History Limit + description: 'Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained.' + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1 + - variable: defaultSettings.recurringJobMaxRetention + label: Maximum Retention Number for Recurring Job + description: "Maximum number of snapshots or backups to be retained." + group: "Longhorn Default Settings" + type: int + default: 100 + - variable: defaultSettings.supportBundleFailedHistoryLimit + label: SupportBundle Failed History Limit + description: "This setting specifies how many failed support bundles can exist in the cluster. Set this value to **0** to have Longhorn automatically purge all failed support bundles." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 1 + - variable: defaultSettings.autoSalvage + label: Automatic salvage + description: "Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly + label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly + description: 'Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.disableSchedulingOnCordonedNode + label: Disable Scheduling On Cordoned Node + description: "Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.replicaZoneSoftAntiAffinity + label: Replica Zone Level Soft Anti-Affinity + description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. Notice that Longhorn relies on label `topology.kubernetes.io/zone=` in the Kubernetes node object to identify the zone. By, default true." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.replicaDiskSoftAntiAffinity + label: Replica Disk Level Soft Anti-Affinity + description: 'Allow scheduling on disks with existing healthy replicas of the same volume. By default, true.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.allowEmptyNodeSelectorVolume + label: Allow Empty Node Selector Volume + description: "Setting that allows scheduling of empty node selector volumes to any node." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.allowEmptyDiskSelectorVolume + label: Allow Empty Disk Selector Volume + description: "Setting that allows scheduling of empty disk selector volumes to any disk." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.nodeDownPodDeletionPolicy + label: Pod Deletion Policy When Node is Down + description: "Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed." + group: "Longhorn Default Settings" + type: enum + options: + - "do-nothing" + - "delete-statefulset-pod" + - "delete-deployment-pod" + - "delete-both-statefulset-and-deployment-pod" + default: "do-nothing" + - variable: defaultSettings.nodeDrainPolicy + label: Node Drain Policy + description: "Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained." + group: "Longhorn Default Settings" + type: enum + options: + - "block-for-eviction" + - "block-for-eviction-if-contains-last-replica" + - "block-if-contains-last-replica" + - "allow-if-replica-is-stopped" + - "always-allow" + default: "block-if-contains-last-replica" + - variable: defaultSettings.detachManuallyAttachedVolumesWhenCordoned + label: Detach Manually Attached Volumes When Cordoned + description: "Setting that allows automatic detaching of manually-attached volumes when a node is cordoned." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.priorityClass + label: Priority Class + description: "PriorityClass for system-managed Longhorn components. This setting can help prevent Longhorn components from being evicted under Node Pressure. Longhorn system contains user deployed components (E.g, Longhorn manager, Longhorn driver, Longhorn UI) and system managed components (E.g, instance manager, engine image, CSI driver, etc.) Note that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. WARNING: DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: string + default: "longhorn-critical" + - variable: defaultSettings.replicaReplenishmentWaitInterval + label: Replica Replenishment Wait Interval + description: "The interval in seconds determines how long Longhorn will at least wait to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 600 + - variable: defaultSettings.concurrentReplicaRebuildPerNodeLimit + label: Concurrent Replica Rebuild Per Node Limit + description: "Maximum number of replicas that can be concurrently rebuilt on each node. + WARNING: + - The old setting \"Disable Replica Rebuild\" is replaced by this setting. + - Different from relying on replica starting delay to limit the concurrent rebuilding, if the rebuilding is disabled, replica object replenishment will be directly skipped. + - When the value is 0, the eviction and data locality feature won't work. But this shouldn't have any impact to any current replica rebuild and backup restore." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 5 + - variable: defaultSettings.concurrentVolumeBackupRestorePerNodeLimit + label: Concurrent Volume Backup Restore Per Node Limit + description: "Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is \"0\", restoration of volumes using a backup is disabled." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 5 + - variable: defaultSettings.disableRevisionCounter + label: Disable Revision Counter + description: "Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the \"volume-head-xxx.img\" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.systemManagedPodsImagePullPolicy + label: System Managed Pod Image Pull Policy + description: "Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart." + group: "Longhorn Default Settings" + type: enum + options: + - "if-not-present" + - "always" + - "never" + default: "if-not-present" + - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability + label: Allow Volume Creation with Degraded Availability + description: "Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot + label: Automatically Cleanup System Generated Snapshot + description: "Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoCleanupRecurringJobBackupSnapshot + label: Automatically Cleanup Recurring Job Backup Snapshot + description: "Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit + label: Concurrent Automatic Engine Upgrade Per Node Limit + description: "Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is \"0\", Longhorn does not automatically upgrade volume engines to the new default engine image version." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 0 + - variable: defaultSettings.backingImageCleanupWaitInterval + label: Backing Image Cleanup Wait Interval + description: "Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 60 + - variable: defaultSettings.backingImageRecoveryWaitInterval + label: Backing Image Recovery Wait Interval + description: "Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to \"failed\" or \"unknown\"." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 300 + - variable: defaultSettings.guaranteedInstanceManagerCPU + label: Guaranteed Instance Manager CPU + description: "Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is \"12\". + WARNING: + - Value 0 means removing the CPU requests from spec of instance manager pods. + - Considering the possible number of new instance manager pods in a further system upgrade, this integer value ranges from 0 to 40. + - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. + - This global setting will be ignored for a node if the field \"InstanceManagerCPURequest\" on the node is set. + - After this setting is changed, all instance manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 40 + default: 12 + - variable: defaultSettings.logLevel + label: Log Level + description: 'Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace")' + group: "Longhorn Default Settings" + type: string + default: "Info" + - variable: defaultSettings.disableSnapshotPurge + label: Disable Snapshot Purge + description: "Setting that temporarily prevents all attempts to purge volume snapshots." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.kubernetesClusterAutoscalerEnabled + label: Kubernetes Cluster Autoscaler Enabled (Experimental) + description: "Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. + WARNING: + - Replica rebuilding could be expensive because nodes with reusable replicas could get removed by the Kubernetes Cluster Autoscaler." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.orphanAutoDeletion + label: Orphaned Data Cleanup + description: "Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.storageNetwork + label: Storage Network + description: "Longhorn uses the storage network for in-cluster data traffic. Leave this blank to use the Kubernetes cluster network. + WARNING: + - This setting should change after detaching all Longhorn volumes, as some of the Longhorn system component pods will get recreated to apply the setting. Longhorn will try to block this setting update when there are attached volumes." + group: "Longhorn Default Settings" + type: string + default: +- variable: defaultSettings.deletingConfirmationFlag + label: Deleting Confirmation Flag + description: "Flag that prevents accidental uninstallation of Longhorn." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.engineReplicaTimeout + label: Timeout between Engine and Replica + description: "Timeout between the Longhorn Engine and replicas. Specify a value between \"8\" and \"30\" seconds. The default value is \"8\"." + group: "Longhorn Default Settings" + type: int + default: "8" +- variable: defaultSettings.snapshotDataIntegrity + label: Snapshot Data Integrity + description: "This setting allows users to enable or disable snapshot hashing and data integrity checking." + group: "Longhorn Default Settings" + type: string + default: "disabled" +- variable: defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation + label: Immediate Snapshot Data Integrity Check After Creating a Snapshot + description: "Hashing snapshot disk files impacts the performance of the system. The immediate snapshot hashing and checking can be disabled to minimize the impact after creating a snapshot." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.snapshotDataIntegrityCronjob + label: Snapshot Data Integrity Check CronJob + description: "Unix-cron string format. The setting specifies when Longhorn checks the data integrity of snapshot disk files." + group: "Longhorn Default Settings" + type: string + default: "0 0 */7 * *" +- variable: defaultSettings.removeSnapshotsDuringFilesystemTrim + label: Remove Snapshots During Filesystem Trim + description: "This setting allows Longhorn filesystem trim feature to automatically mark the latest snapshot and its ancestors as removed and stops at the snapshot containing multiple children." + group: "Longhorn Default Settings" + type: boolean + default: "false" +- variable: defaultSettings.fastReplicaRebuildEnabled + label: Fast Replica Rebuild Enabled + description: "Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to \"enable\" or \"fast-check\"." + group: "Longhorn Default Settings" + type: boolean + default: false +- variable: defaultSettings.replicaFileSyncHttpClientTimeout + label: Timeout of HTTP Client to Replica File Sync Server + description: "In seconds. The setting specifies the HTTP client timeout to the file sync server." + group: "Longhorn Default Settings" + type: int + default: "30" +- variable: defaultSettings.backupCompressionMethod + label: Backup Compression Method + description: "Setting that allows you to specify a backup compression method." + group: "Longhorn Default Settings" + type: string + default: "lz4" +- variable: defaultSettings.backupConcurrentLimit + label: Backup Concurrent Limit Per Backup + description: "Maximum number of worker threads that can concurrently run for each backup." + group: "Longhorn Default Settings" + type: int + min: 1 + default: 2 +- variable: defaultSettings.restoreConcurrentLimit + label: Restore Concurrent Limit Per Backup + description: "This setting controls how many worker threads per restore concurrently." + group: "Longhorn Default Settings" + type: int + min: 1 + default: 2 +- variable: defaultSettings.allowCollectingLonghornUsageMetrics + label: Allow Collecting Longhorn Usage Metrics + description: "Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses." + group: "Longhorn Default Settings" + type: boolean + default: true +- variable: defaultSettings.v1DataEngine + label: V1 Data Engine + description: "Setting that allows you to enable the V1 Data Engine." + group: "Longhorn V1 Data Engine Settings" + type: boolean + default: true +- variable: defaultSettings.v2DataEngine + label: V2 Data Engine + description: "Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. + WARNING: + - DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES. Longhorn will block this setting update when there are attached volumes. + - When the V2 Data Engine is enabled, each instance-manager pod utilizes 1 CPU core. This high CPU usage is attributed to the spdk_tgt process running within each instance-manager pod. The spdk_tgt process is responsible for handling input/output (IO) operations and requires intensive polling. As a result, it consumes 100% of a dedicated CPU core to efficiently manage and process the IO requests, ensuring optimal performance and responsiveness for storage operations." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: boolean + default: false +- variable: defaultSettings.v2DataEngineHugepageLimit + label: V2 Data Engine + description: "This allows users to configure maximum huge page size (in MiB) for the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: int + default: "2048" +- variable: defaultSettings.v2DataEngineLogLevel + label: V2 Data Engine + description: "Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: enum + options: + - "Disabled" + - "Error" + - "Warn" + - "Notice" + - "Info" + - "Debug" + default: "Notice" +- variable: defaultSettings.v2DataEngineLogFlags + label: V2 Data Engine + description: "Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + type: string + default: +- variable: defaultSettings.offlineReplicaRebuilding + label: Offline Replica Rebuilding + description: "Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine." + group: "Longhorn V2 Data Engine (Preview Feature) Settings" + required: true + type: enum + options: + - "enabled" + - "disabled" + default: "enabled" +- variable: persistence.defaultClass + default: "true" + description: "Setting that allows you to specify the default Longhorn StorageClass." + label: Default Storage Class + group: "Longhorn Storage Class Settings" + required: true + type: boolean +- variable: persistence.reclaimPolicy + label: Storage Class Retain Policy + description: "Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: \"Retain\", \"Delete\")" + group: "Longhorn Storage Class Settings" + required: true + type: enum + options: + - "Delete" + - "Retain" + default: "Delete" +- variable: persistence.defaultClassReplicaCount + description: "Replica count of the default Longhorn StorageClass." + label: Default Storage Class Replica Count + group: "Longhorn Storage Class Settings" + type: int + min: 1 + max: 10 + default: 3 +- variable: persistence.defaultDataLocality + description: "Data locality of the default Longhorn StorageClass. (Options: \"disabled\", \"best-effort\")" + label: Default Storage Class Data Locality + group: "Longhorn Storage Class Settings" + type: enum + options: + - "disabled" + - "best-effort" + default: "disabled" +- variable: persistence.recurringJobSelector.enable + description: "Setting that allows you to enable the recurring job selector for a Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Recurring Job Selector + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.recurringJobSelector.jobList + description: 'Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`)' + label: Storage Class Recurring Job Selector List + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.defaultNodeSelector.enable + description: "Setting that allows you to enable the node selector for the default Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Node Selector + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.defaultNodeSelector.selector + label: Storage Class Node Selector + description: 'Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast")' + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.backingImage.enable + description: "Setting that allows you to use a backing image in a Longhorn StorageClass." + group: "Longhorn Storage Class Settings" + label: Default Storage Class Backing Image + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.backingImage.name + description: 'Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image.' + label: Storage Class Backing Image Name + group: "Longhorn Storage Class Settings" + type: string + default: + - variable: persistence.backingImage.expectedChecksum + description: 'Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. + WARNING: + - If the backing image name is not specified, setting this field is meaningless. + - It is not recommended to set this field if the data source type is \"export-from-volume\".' + label: Storage Class Backing Image Expected SHA512 Checksum + group: "Longhorn Storage Class Settings" + type: string + default: + - variable: persistence.backingImage.dataSourceType + description: 'Data source type of a backing image used in a Longhorn StorageClass. If the backing image exists in the cluster, Longhorn uses this setting to verify the image. If the backing image does not exist, Longhorn creates one using the specified data source type. + WARNING: + - If the backing image name is not specified, setting this field is meaningless. + - As for backing image creation with data source type \"upload\", it is recommended to do it via UI rather than StorageClass here. Uploading requires file data sending to the Longhorn backend after the object creation, which is complicated if you want to handle it manually.' + label: Storage Class Backing Image Data Source Type + group: "Longhorn Storage Class Settings" + type: enum + options: + - "" + - "download" + - "upload" + - "export-from-volume" + default: "" + - variable: persistence.backingImage.dataSourceParameters + description: "Data source parameters of a backing image used in a Longhorn StorageClass. You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) + WARNING: + - If the backing image name is not specified, setting this field is meaningless. + - Be careful of the quotes here." + label: Storage Class Backing Image Data Source Parameters + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: persistence.removeSnapshotsDuringFilesystemTrim + description: "Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: \"ignored\", \"enabled\", \"disabled\")" + label: Default Storage Class Remove Snapshots During Filesystem Trim + group: "Longhorn Storage Class Settings" + type: enum + options: + - "ignored" + - "enabled" + - "disabled" + default: "ignored" +- variable: ingress.enabled + default: "false" + description: "Expose app using Layer 7 Load Balancer - ingress" + type: boolean + group: "Services and Load Balancing" + label: Expose app using Layer 7 Load Balancer + show_subquestion_if: true + subquestions: + - variable: ingress.host + default: "xip.io" + description: "Hostname of the Layer 7 load balancer." + type: hostname + required: true + label: Layer 7 Load Balancer Hostname + - variable: ingress.path + default: "/" + description: "Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}." + type: string + required: true + label: Ingress Path +- variable: service.ui.type + default: "Rancher-Proxy" + description: "Service type for Longhorn UI. (Options: \"ClusterIP\", \"NodePort\", \"LoadBalancer\", \"Rancher-Proxy\")" + type: enum + options: + - "ClusterIP" + - "NodePort" + - "LoadBalancer" + - "Rancher-Proxy" + label: Longhorn UI Service + show_if: "ingress.enabled=false" + group: "Services and Load Balancing" + show_subquestion_if: "NodePort" + subquestions: + - variable: service.ui.nodePort + default: "" + description: "NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767." + type: int + min: 30000 + max: 32767 + show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer" + label: UI Service NodePort number +- variable: enablePSP + default: "false" + description: "Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled." + label: Pod Security Policy + type: boolean + group: "Other Settings" +- variable: global.cattle.windowsCluster.enabled + default: "false" + description: "Setting that allows Longhorn to run on a Rancher Windows cluster." + label: Rancher Windows Cluster + type: boolean + group: "Other Settings" +- variable: networkPolicies.enabled + description: "Setting that allows you to enable network policies that control access to Longhorn pods. + Warning: The Rancher Proxy will not work if this feature is enabled and a custom NetworkPolicy must be added." + group: "Other Settings" + label: Network Policies + default: "false" + type: boolean + subquestions: + - variable: networkPolicies.type + label: Network Policies for Ingress + description: "Distribution that determines the policy for allowing access for an ingress. (Options: \"k3s\", \"rke2\", \"rke1\")" + show_if: "networkPolicies.enabled=true&&ingress.enabled=true" + type: enum + default: "rke2" + options: + - "rke1" + - "rke2" + - "k3s" + - variable: defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU + label: Guaranteed Instance Manager CPU for V2 Data Engine + description: 'Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". + WARNING: + - Specifying a value of 0 disables CPU requests for instance manager pods. You must specify an integer between 1000 and 8000. + - This is a global setting. Modifying the value triggers an automatic restart of the instance manager pods. Do not modify the value while volumes are still attached." + group: "Longhorn Default Settings' + type: int + min: 1000 + max: 8000 + default: 1250 diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/NOTES.txt b/deploy/charts/harvester/dependency_charts/longhorn/templates/NOTES.txt new file mode 100644 index 00000000000..cca7cd77b94 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/NOTES.txt @@ -0,0 +1,5 @@ +Longhorn is now installed on the cluster! + +Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized. + +Visit our documentation at https://longhorn.io/docs/ diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/_helpers.tpl b/deploy/charts/harvester/dependency_charts/longhorn/templates/_helpers.tpl new file mode 100644 index 00000000000..3fbc2ac02f8 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/_helpers.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "longhorn.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "longhorn.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{- define "longhorn.managerIP" -}} +{{- $fullname := (include "longhorn.fullname" .) -}} +{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{- define "secret" }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }} +{{- end }} + +{{- /* +longhorn.labels generates the standard Helm labels. +*/ -}} +{{- define "longhorn.labels" -}} +app.kubernetes.io/name: {{ template "longhorn.name" . }} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} + + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{- define "registry_url" -}} +{{- if .Values.privateRegistry.registryUrl -}} +{{- printf "%s/" .Values.privateRegistry.registryUrl -}} +{{- else -}} +{{ include "system_default_registry" . }} +{{- end -}} +{{- end -}} + +{{- /* + define the longhorn release namespace +*/ -}} +{{- define "release_namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/clusterrole.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/clusterrole.yaml new file mode 100644 index 00000000000..f6e069f004a --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/clusterrole.yaml @@ -0,0 +1,77 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-role + labels: {{- include "longhorn.labels" . | nindent 4 }} +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: [""] + resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps", "serviceaccounts"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] +- apiGroups: ["apps"] + resources: ["daemonsets", "statefulsets", "deployments"] + verbs: ["*"] +- apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["*"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets", "podsecuritypolicies"] + verbs: ["*"] +- apiGroups: ["scheduling.k8s.io"] + resources: ["priorityclasses"] + verbs: ["watch", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "volumeattachments", "volumeattachments/status", "csinodes", "csidrivers"] + verbs: ["*"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"] + verbs: ["*"] +- apiGroups: ["longhorn.io"] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + {{- if .Values.openshift.enabled }} + "engineimages/finalizers", "nodes/finalizers", "instancemanagers/finalizers", + {{- end }} + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", + "backingimagemanagers", "backingimagemanagers/status", "backingimagedatasources", "backingimagedatasources/status", + "backuptargets", "backuptargets/status", "backupvolumes", "backupvolumes/status", "backups", "backups/status", + "recurringjobs", "recurringjobs/status", "orphans", "orphans/status", "snapshots", "snapshots/status", + "supportbundles", "supportbundles/status", "systembackups", "systembackups/status", "systemrestores", "systemrestores/status", + "volumeattachments", "volumeattachments/status", "backupbackingimages", "backupbackingimages/status"] + verbs: ["*"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] +- apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["list", "watch"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "create", "patch", "delete"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterrolebindings", "clusterroles"] + verbs: ["*"] +{{- if .Values.openshift.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-ocp-privileged-role + labels: {{- include "longhorn.labels" . | nindent 4 }} +rules: +- apiGroups: ["security.openshift.io"] + resources: ["securitycontextconstraints"] + resourceNames: ["anyuid", "privileged"] + verbs: ["use"] +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/clusterrolebinding.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/clusterrolebinding.yaml new file mode 100644 index 00000000000..2e34f014ce4 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/clusterrolebinding.yaml @@ -0,0 +1,49 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-bind + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-support-bundle + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: longhorn-support-bundle + namespace: {{ include "release_namespace" . }} +{{- if .Values.openshift.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-ocp-privileged-bind + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-ocp-privileged-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: longhorn-ui-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: default # supportbundle-agent-support-bundle uses default sa + namespace: {{ include "release_namespace" . }} +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/crds.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/crds.yaml new file mode 100644 index 00000000000..892217f321b --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/crds.yaml @@ -0,0 +1,3954 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: backingimagedatasources.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageDataSource + listKind: BackingImageDataSourceList + plural: backingimagedatasources + shortNames: + - lhbids + singular: backingimagedatasource + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the pod used to provision the backing image file from source + jsonPath: .status.currentState + name: State + type: string + - description: The data source type + jsonPath: .spec.sourceType + name: SourceType + type: string + - description: The node the backing image file will be prepared on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the backing image file will be prepared on + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: BackingImageDataSource is where Longhorn stores backing image data source object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The system generated UUID of the provisioned backing image file + jsonPath: .spec.uuid + name: UUID + type: string + - description: The current state of the pod used to provision the backing image file from source + jsonPath: .status.currentState + name: State + type: string + - description: The data source type + jsonPath: .spec.sourceType + name: SourceType + type: string + - description: The backing image file size + jsonPath: .status.size + name: Size + type: string + - description: The node the backing image file will be prepared on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the backing image file will be prepared on + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackingImageDataSource is where Longhorn stores backing image data source object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackingImageDataSourceSpec defines the desired state of the Longhorn backing image data source + properties: + checksum: + type: string + diskPath: + type: string + diskUUID: + type: string + fileTransferred: + type: boolean + nodeID: + type: string + parameters: + additionalProperties: + type: string + type: object + sourceType: + enum: + - download + - upload + - export-from-volume + - restore + type: string + uuid: + type: string + type: object + status: + description: BackingImageDataSourceStatus defines the observed state of the Longhorn backing image data source + properties: + checksum: + type: string + currentState: + type: string + ip: + type: string + message: + type: string + ownerID: + type: string + progress: + type: integer + runningParameters: + additionalProperties: + type: string + nullable: true + type: object + size: + format: int64 + type: integer + storageIP: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: backingimagemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageManager + listKind: BackingImageManagerList + plural: backingimagemanagers + shortNames: + - lhbim + singular: backingimagemanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the manager + jsonPath: .status.currentState + name: State + type: string + - description: The image the manager pod will use + jsonPath: .spec.image + name: Image + type: string + - description: The node the manager is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - description: The disk path the manager is using + jsonPath: .spec.diskPath + name: DiskPath + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: BackingImageManager is where Longhorn stores backing image manager object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The current state of the manager + jsonPath: .status.currentState + name: State + type: string + - description: The image the manager pod will use + jsonPath: .spec.image + name: Image + type: string + - description: The node the manager is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + name: DiskUUID + type: string + - description: The disk path the manager is using + jsonPath: .spec.diskPath + name: DiskPath + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackingImageManager is where Longhorn stores backing image manager object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackingImageManagerSpec defines the desired state of the Longhorn backing image manager + properties: + backingImages: + additionalProperties: + type: string + type: object + diskPath: + type: string + diskUUID: + type: string + image: + type: string + nodeID: + type: string + type: object + status: + description: BackingImageManagerStatus defines the observed state of the Longhorn backing image manager + properties: + apiMinVersion: + type: integer + apiVersion: + type: integer + backingImageFileMap: + additionalProperties: + properties: + currentChecksum: + type: string + message: + type: string + name: + type: string + progress: + type: integer + senderManagerAddress: + type: string + sendingReference: + type: integer + size: + format: int64 + type: integer + state: + type: string + uuid: + type: string + virtualSize: + format: int64 + type: integer + type: object + nullable: true + type: object + currentState: + type: string + ip: + type: string + ownerID: + type: string + storageIP: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: backingimages.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: BackingImage + listKind: BackingImageList + plural: backingimages + shortNames: + - lhbi + singular: backingimage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backing image name + jsonPath: .spec.image + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: BackingImage is where Longhorn stores backing image object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The system generated UUID + jsonPath: .status.uuid + name: UUID + type: string + - description: The source of the backing image file data + jsonPath: .spec.sourceType + name: SourceType + type: string + - description: The backing image file size in each disk + jsonPath: .status.size + name: Size + type: string + - description: The virtual size of the image (may be larger than file size) + jsonPath: .status.virtualSize + name: VirtualSize + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: BackingImage is where Longhorn stores backing image object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackingImageSpec defines the desired state of the Longhorn backing image + properties: + checksum: + type: string + disks: + additionalProperties: + type: string + type: object + sourceParameters: + additionalProperties: + type: string + type: object + sourceType: + enum: + - download + - upload + - export-from-volume + - restore + type: string + type: object + status: + description: BackingImageStatus defines the observed state of the Longhorn backing image status + properties: + checksum: + type: string + diskFileStatusMap: + additionalProperties: + properties: + lastStateTransitionTime: + type: string + message: + type: string + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + diskLastRefAtMap: + additionalProperties: + type: string + nullable: true + type: object + ownerID: + type: string + size: + format: int64 + type: integer + uuid: + type: string + virtualSize: + description: Virtual size of image, which may be larger than physical size. Will be zero until known (e.g. while a backing image is uploading) + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: + longhorn-manager: "" + name: backupbackingimages.longhorn.io +spec: + group: longhorn.io + names: + kind: BackupBackingImage + listKind: BackupBackingImageList + plural: backupbackingimages + shortNames: + - lhbbi + singular: backupbackingimage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backing image name + jsonPath: .status.backingImage + name: BackingImage + type: string + - description: The backing image size + jsonPath: .status.size + name: Size + type: string + - description: The backing image backup upload finished time + jsonPath: .status.backupCreatedAt + name: BackupCreatedAt + type: string + - description: The backing image backup state + jsonPath: .status.state + name: State + type: string + - description: The last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupBackingImage is where Longhorn stores backing image backup object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupBackingImageSpec defines the desired state of the Longhorn backing image backup + properties: + labels: + additionalProperties: + type: string + description: The labels of backing image backup. + type: object + syncRequestedAt: + description: The time to request run sync the remote backing image backup. + format: date-time + nullable: true + type: string + userCreated: + description: Is this CR created by user through API or UI. Required + type: boolean + required: + - userCreated + type: object + status: + description: BackupBackingImageStatus defines the observed state of the Longhorn backing image backup + properties: + backingImage: + description: The backing image name. + type: string + backupCreatedAt: + description: The backing image backup upload finished time. + type: string + checksum: + description: The checksum of the backing image. + type: string + compressionMethod: + description: Compression method + type: string + error: + description: The error message when taking the backing image backup. + type: string + labels: + additionalProperties: + type: string + description: The labels of backing image backup. + nullable: true + type: object + lastSyncedAt: + description: The last time that the backing image backup was synced with the remote backup target. + format: date-time + nullable: true + type: string + managerAddress: + description: The address of the backing image manager that runs backing image backup. + type: string + messages: + additionalProperties: + type: string + description: The error messages when listing or inspecting backing image backup. + nullable: true + type: object + ownerID: + description: The node ID on which the controller is responsible to reconcile this CR. + type: string + progress: + description: The backing image backup progress. + type: integer + size: + description: The backing image size. + format: int64 + type: integer + state: + description: The backing image backup creation state. Can be "", "InProgress", "Completed", "Error", "Unknown". + type: string + url: + description: The backing image backup URL. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: backups.longhorn.io +spec: + group: longhorn.io + names: + kind: Backup + listKind: BackupList + plural: backups + shortNames: + - lhb + singular: backup + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The snapshot name + jsonPath: .status.snapshotName + name: SnapshotName + type: string + - description: The snapshot size + jsonPath: .status.size + name: SnapshotSize + type: string + - description: The snapshot creation time + jsonPath: .status.snapshotCreatedAt + name: SnapshotCreatedAt + type: string + - description: The backup state + jsonPath: .status.state + name: State + type: string + - description: The backup last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: Backup is where Longhorn stores backup object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The snapshot name + jsonPath: .status.snapshotName + name: SnapshotName + type: string + - description: The snapshot size + jsonPath: .status.size + name: SnapshotSize + type: string + - description: The snapshot creation time + jsonPath: .status.snapshotCreatedAt + name: SnapshotCreatedAt + type: string + - description: The backup state + jsonPath: .status.state + name: State + type: string + - description: The backup last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: Backup is where Longhorn stores backup object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupSpec defines the desired state of the Longhorn backup + properties: + labels: + additionalProperties: + type: string + description: The labels of snapshot backup. + type: object + snapshotName: + description: The snapshot name. + type: string + syncRequestedAt: + description: The time to request run sync the remote backup. + format: date-time + nullable: true + type: string + type: object + status: + description: BackupStatus defines the observed state of the Longhorn backup + properties: + backupCreatedAt: + description: The snapshot backup upload finished time. + type: string + compressionMethod: + description: Compression method + type: string + error: + description: The error message when taking the snapshot backup. + type: string + labels: + additionalProperties: + type: string + description: The labels of snapshot backup. + nullable: true + type: object + lastSyncedAt: + description: The last time that the backup was synced with the remote backup target. + format: date-time + nullable: true + type: string + messages: + additionalProperties: + type: string + description: The error messages when calling longhorn engine on listing or inspecting backups. + nullable: true + type: object + ownerID: + description: The node ID on which the controller is responsible to reconcile this backup CR. + type: string + progress: + description: The snapshot backup progress. + type: integer + replicaAddress: + description: The address of the replica that runs snapshot backup. + type: string + size: + description: The snapshot size. + type: string + snapshotCreatedAt: + description: The snapshot creation time. + type: string + snapshotName: + description: The snapshot name. + type: string + state: + description: The backup creation state. Can be "", "InProgress", "Completed", "Error", "Unknown". + type: string + url: + description: The snapshot backup URL. + type: string + volumeBackingImageName: + description: The volume's backing image name. + type: string + volumeCreated: + description: The volume creation time. + type: string + volumeName: + description: The volume name. + type: string + volumeSize: + description: The volume size. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: backuptargets.longhorn.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: BackupTarget + listKind: BackupTargetList + plural: backuptargets + shortNames: + - lhbt + singular: backuptarget + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backup target URL + jsonPath: .spec.backupTargetURL + name: URL + type: string + - description: The backup target credential secret + jsonPath: .spec.credentialSecret + name: Credential + type: string + - description: The backup target poll interval + jsonPath: .spec.pollInterval + name: LastBackupAt + type: string + - description: Indicate whether the backup target is available or not + jsonPath: .status.available + name: Available + type: boolean + - description: The backup target last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: BackupTarget is where Longhorn stores backup target object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The backup target URL + jsonPath: .spec.backupTargetURL + name: URL + type: string + - description: The backup target credential secret + jsonPath: .spec.credentialSecret + name: Credential + type: string + - description: The backup target poll interval + jsonPath: .spec.pollInterval + name: LastBackupAt + type: string + - description: Indicate whether the backup target is available or not + jsonPath: .status.available + name: Available + type: boolean + - description: The backup target last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupTarget is where Longhorn stores backup target object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupTargetSpec defines the desired state of the Longhorn backup target + properties: + backupTargetURL: + description: The backup target URL. + type: string + credentialSecret: + description: The backup target credential secret. + type: string + pollInterval: + description: The interval that the cluster needs to run sync with the backup target. + type: string + syncRequestedAt: + description: The time to request run sync the remote backup target. + format: date-time + nullable: true + type: string + type: object + status: + description: BackupTargetStatus defines the observed state of the Longhorn backup target + properties: + available: + description: Available indicates if the remote backup target is available or not. + type: boolean + conditions: + description: Records the reason on why the backup target is unavailable. + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + lastSyncedAt: + description: The last time that the controller synced with the remote backup target. + format: date-time + nullable: true + type: string + ownerID: + description: The node ID on which the controller is responsible to reconcile this backup target CR. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: backupvolumes.longhorn.io +spec: + group: longhorn.io + names: + kind: BackupVolume + listKind: BackupVolumeList + plural: backupvolumes + shortNames: + - lhbv + singular: backupvolume + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The backup volume creation time + jsonPath: .status.createdAt + name: CreatedAt + type: string + - description: The backup volume last backup name + jsonPath: .status.lastBackupName + name: LastBackupName + type: string + - description: The backup volume last backup time + jsonPath: .status.lastBackupAt + name: LastBackupAt + type: string + - description: The backup volume last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: BackupVolume is where Longhorn stores backup volume object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The backup volume creation time + jsonPath: .status.createdAt + name: CreatedAt + type: string + - description: The backup volume last backup name + jsonPath: .status.lastBackupName + name: LastBackupName + type: string + - description: The backup volume last backup time + jsonPath: .status.lastBackupAt + name: LastBackupAt + type: string + - description: The backup volume last synced time + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: BackupVolume is where Longhorn stores backup volume object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BackupVolumeSpec defines the desired state of the Longhorn backup volume + properties: + syncRequestedAt: + description: The time to request run sync the remote backup volume. + format: date-time + nullable: true + type: string + type: object + status: + description: BackupVolumeStatus defines the observed state of the Longhorn backup volume + properties: + backingImageChecksum: + description: the backing image checksum. + type: string + backingImageName: + description: The backing image name. + type: string + createdAt: + description: The backup volume creation time. + type: string + dataStored: + description: The backup volume block count. + type: string + labels: + additionalProperties: + type: string + description: The backup volume labels. + nullable: true + type: object + lastBackupAt: + description: The latest volume backup time. + type: string + lastBackupName: + description: The latest volume backup name. + type: string + lastModificationTime: + description: The backup volume config last modification time. + format: date-time + nullable: true + type: string + lastSyncedAt: + description: The last time that the backup volume was synced into the cluster. + format: date-time + nullable: true + type: string + messages: + additionalProperties: + type: string + description: The error messages when call longhorn engine on list or inspect backup volumes. + nullable: true + type: object + ownerID: + description: The node ID on which the controller is responsible to reconcile this backup volume CR. + type: string + size: + description: The backup volume size. + type: string + storageClassName: + description: the storage class name of pv/pvc binding with the volume. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: engineimages.longhorn.io +spec: + preserveUnknownFields: false + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: EngineImage + listKind: EngineImageList + plural: engineimages + shortNames: + - lhei + singular: engineimage + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: State of the engine image + jsonPath: .status.state + name: State + type: string + - description: The Longhorn engine image + jsonPath: .spec.image + name: Image + type: string + - description: Number of resources using the engine image + jsonPath: .status.refCount + name: RefCount + type: integer + - description: The build date of the engine image + jsonPath: .status.buildDate + name: BuildDate + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: EngineImage is where Longhorn stores engine image object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Compatibility of the engine image + jsonPath: .status.incompatible + name: Incompatible + type: boolean + - description: State of the engine image + jsonPath: .status.state + name: State + type: string + - description: The Longhorn engine image + jsonPath: .spec.image + name: Image + type: string + - description: Number of resources using the engine image + jsonPath: .status.refCount + name: RefCount + type: integer + - description: The build date of the engine image + jsonPath: .status.buildDate + name: BuildDate + type: date + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: EngineImage is where Longhorn stores engine image object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EngineImageSpec defines the desired state of the Longhorn engine image + properties: + image: + minLength: 1 + type: string + required: + - image + type: object + status: + description: EngineImageStatus defines the observed state of the Longhorn engine image + properties: + buildDate: + type: string + cliAPIMinVersion: + type: integer + cliAPIVersion: + type: integer + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + controllerAPIMinVersion: + type: integer + controllerAPIVersion: + type: integer + dataFormatMinVersion: + type: integer + dataFormatVersion: + type: integer + gitCommit: + type: string + incompatible: + type: boolean + noRefSince: + type: string + nodeDeploymentMap: + additionalProperties: + type: boolean + nullable: true + type: object + ownerID: + type: string + refCount: + type: integer + state: + type: string + version: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: engines.longhorn.io +spec: + group: longhorn.io + names: + kind: Engine + listKind: EngineList + plural: engines + shortNames: + - lhe + singular: engine + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the engine + jsonPath: .status.currentState + name: State + type: string + - description: The node that the engine is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The instance manager of the engine + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the engine + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Engine is where Longhorn stores engine object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the engine + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The current state of the engine + jsonPath: .status.currentState + name: State + type: string + - description: The node that the engine is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The instance manager of the engine + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the engine + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Engine is where Longhorn stores engine object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: EngineSpec defines the desired state of the Longhorn engine + properties: + active: + type: boolean + backendStoreDriver: + description: 'Deprecated: Replaced by field `dataEngine`.' + type: string + backupVolume: + type: string + dataEngine: + enum: + - v1 + - v2 + type: string + desireState: + type: string + disableFrontend: + type: boolean + engineImage: + description: 'Deprecated: Replaced by field `image`.' + type: string + frontend: + enum: + - blockdev + - iscsi + - nvmf + - "" + type: string + image: + type: string + logRequested: + type: boolean + nodeID: + type: string + replicaAddressMap: + additionalProperties: + type: string + type: object + requestedBackupRestore: + type: string + requestedDataSource: + type: string + revisionCounterDisabled: + type: boolean + salvageRequested: + type: boolean + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + unmapMarkSnapChainRemovedEnabled: + type: boolean + upgradedReplicaAddressMap: + additionalProperties: + type: string + type: object + volumeName: + type: string + volumeSize: + format: int64 + type: string + type: object + status: + description: EngineStatus defines the observed state of the Longhorn engine + properties: + backupStatus: + additionalProperties: + properties: + backupURL: + type: string + error: + type: string + progress: + type: integer + replicaAddress: + type: string + snapshotName: + type: string + state: + type: string + type: object + nullable: true + type: object + cloneStatus: + additionalProperties: + properties: + error: + type: string + fromReplicaAddress: + type: string + isCloning: + type: boolean + progress: + type: integer + snapshotName: + type: string + state: + type: string + type: object + nullable: true + type: object + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + currentImage: + type: string + currentReplicaAddressMap: + additionalProperties: + type: string + nullable: true + type: object + currentSize: + format: int64 + type: string + currentState: + type: string + endpoint: + type: string + instanceManagerName: + type: string + ip: + type: string + isExpanding: + type: boolean + lastExpansionError: + type: string + lastExpansionFailedAt: + type: string + lastRestoredBackup: + type: string + logFetched: + type: boolean + ownerID: + type: string + port: + type: integer + purgeStatus: + additionalProperties: + properties: + error: + type: string + isPurging: + type: boolean + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + rebuildStatus: + additionalProperties: + properties: + error: + type: string + fromReplicaAddress: + type: string + isRebuilding: + type: boolean + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + replicaModeMap: + additionalProperties: + type: string + nullable: true + type: object + replicaTransitionTimeMap: + additionalProperties: + type: string + description: ReplicaTransitionTimeMap records the time a replica in ReplicaModeMap transitions from one mode to another (or from not being in the ReplicaModeMap to being in it). This information is sometimes required by other controllers (e.g. the volume controller uses it to determine the correct value for replica.Spec.lastHealthyAt). + type: object + restoreStatus: + additionalProperties: + properties: + backupURL: + type: string + currentRestoringBackup: + type: string + error: + type: string + filename: + type: string + isRestoring: + type: boolean + lastRestored: + type: string + progress: + type: integer + state: + type: string + type: object + nullable: true + type: object + salvageExecuted: + type: boolean + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + snapshots: + additionalProperties: + properties: + children: + additionalProperties: + type: boolean + nullable: true + type: object + created: + type: string + labels: + additionalProperties: + type: string + nullable: true + type: object + name: + type: string + parent: + type: string + removed: + type: boolean + size: + type: string + usercreated: + type: boolean + type: object + nullable: true + type: object + snapshotsError: + type: string + started: + type: boolean + storageIP: + type: string + unmapMarkSnapChainRemovedEnabled: + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: instancemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: InstanceManager + listKind: InstanceManagerList + plural: instancemanagers + shortNames: + - lhim + singular: instancemanager + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the instance manager + jsonPath: .status.currentState + name: State + type: string + - description: The type of the instance manager (engine or replica) + jsonPath: .spec.type + name: Type + type: string + - description: The node that the instance manager is running on + jsonPath: .spec.nodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: InstanceManager is where Longhorn stores instance manager object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the instance manager + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The state of the instance manager + jsonPath: .status.currentState + name: State + type: string + - description: The type of the instance manager (engine or replica) + jsonPath: .spec.type + name: Type + type: string + - description: The node that the instance manager is running on + jsonPath: .spec.nodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: InstanceManager is where Longhorn stores instance manager object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: InstanceManagerSpec defines the desired state of the Longhorn instance manager + properties: + dataEngine: + type: string + image: + type: string + nodeID: + type: string + type: + enum: + - aio + - engine + - replica + type: string + type: object + status: + description: InstanceManagerStatus defines the observed state of the Longhorn instance manager + properties: + apiMinVersion: + type: integer + apiVersion: + type: integer + proxyApiMinVersion: + type: integer + proxyApiVersion: + type: integer + currentState: + type: string + instanceEngines: + additionalProperties: + properties: + spec: + properties: + backendStoreDriver: + description: 'Deprecated: Replaced by field `dataEngine`.' + type: string + dataEngine: + type: string + name: + type: string + type: object + status: + properties: + conditions: + additionalProperties: + type: boolean + nullable: true + type: object + endpoint: + type: string + errorMsg: + type: string + listen: + type: string + portEnd: + format: int32 + type: integer + portStart: + format: int32 + type: integer + resourceVersion: + format: int64 + type: integer + state: + type: string + type: + type: string + type: object + type: object + nullable: true + type: object + instanceReplicas: + additionalProperties: + properties: + spec: + properties: + backendStoreDriver: + description: 'Deprecated: Replaced by field `dataEngine`.' + type: string + dataEngine: + type: string + name: + type: string + type: object + status: + properties: + conditions: + additionalProperties: + type: boolean + nullable: true + type: object + endpoint: + type: string + errorMsg: + type: string + listen: + type: string + portEnd: + format: int32 + type: integer + portStart: + format: int32 + type: integer + resourceVersion: + format: int64 + type: integer + state: + type: string + type: + type: string + type: object + type: object + nullable: true + type: object + instances: + additionalProperties: + properties: + spec: + properties: + backendStoreDriver: + description: 'Deprecated: Replaced by field `dataEngine`.' + type: string + dataEngine: + type: string + name: + type: string + type: object + status: + properties: + conditions: + additionalProperties: + type: boolean + nullable: true + type: object + endpoint: + type: string + errorMsg: + type: string + listen: + type: string + portEnd: + format: int32 + type: integer + portStart: + format: int32 + type: integer + resourceVersion: + format: int64 + type: integer + state: + type: string + type: + type: string + type: object + type: object + nullable: true + description: 'Deprecated: Replaced by InstanceEngines and InstanceReplicas' + type: object + ip: + type: string + ownerID: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: nodes.longhorn.io +spec: + preserveUnknownFields: false + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: Node + listKind: NodeList + plural: nodes + shortNames: + - lhn + singular: node + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Indicate whether the node is ready + jsonPath: .status.conditions['Ready']['status'] + name: Ready + type: string + - description: Indicate whether the user disabled/enabled replica scheduling for the node + jsonPath: .spec.allowScheduling + name: AllowScheduling + type: boolean + - description: Indicate whether Longhorn can schedule replicas on the node + jsonPath: .status.conditions['Schedulable']['status'] + name: Schedulable + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Node is where Longhorn stores Longhorn node object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Indicate whether the node is ready + jsonPath: .status.conditions[?(@.type=='Ready')].status + name: Ready + type: string + - description: Indicate whether the user disabled/enabled replica scheduling for the node + jsonPath: .spec.allowScheduling + name: AllowScheduling + type: boolean + - description: Indicate whether Longhorn can schedule replicas on the node + jsonPath: .status.conditions[?(@.type=='Schedulable')].status + name: Schedulable + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Node is where Longhorn stores Longhorn node object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NodeSpec defines the desired state of the Longhorn node + properties: + allowScheduling: + description: Allow scheduling replicas on the node. + type: boolean + disks: + additionalProperties: + properties: + allowScheduling: + type: boolean + diskType: + enum: + - filesystem + - block + type: string + diskDriver: + enum: + - "" + - auto + - aio + type: string + evictionRequested: + type: boolean + path: + type: string + storageReserved: + format: int64 + type: integer + tags: + items: + type: string + type: array + type: object + type: object + evictionRequested: + type: boolean + instanceManagerCPURequest: + type: integer + name: + type: string + tags: + items: + type: string + type: array + type: object + status: + description: NodeStatus defines the observed state of the Longhorn node + properties: + autoEvicting: + type: boolean + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + diskStatus: + additionalProperties: + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + diskType: + type: string + diskUUID: + type: string + diskDriver: + type: string + diskName: + type: string + filesystemType: + type: string + diskPath: + type: string + scheduledReplica: + additionalProperties: + format: int64 + type: integer + nullable: true + type: object + storageAvailable: + format: int64 + type: integer + storageMaximum: + format: int64 + type: integer + storageScheduled: + format: int64 + type: integer + type: object + description: The status of the disks on the node. + nullable: true + type: object + region: + description: The Region of the node. + type: string + snapshotCheckStatus: + description: The status of the snapshot integrity check. + properties: + lastPeriodicCheckedAt: + format: date-time + type: string + type: object + zone: + description: The Zone of the node. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: orphans.longhorn.io +spec: + group: longhorn.io + names: + kind: Orphan + listKind: OrphanList + plural: orphans + shortNames: + - lho + singular: orphan + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The type of the orphan + jsonPath: .spec.orphanType + name: Type + type: string + - description: The node that the orphan is on + jsonPath: .spec.nodeID + name: Node + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: Orphan is where Longhorn stores orphan object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OrphanSpec defines the desired state of the Longhorn orphaned data + properties: + nodeID: + description: The node ID on which the controller is responsible to reconcile this orphan CR. + type: string + orphanType: + description: The type of the orphaned data. Can be "replica". + type: string + parameters: + additionalProperties: + type: string + description: The parameters of the orphaned data + type: object + type: object + status: + description: OrphanStatus defines the observed state of the Longhorn orphaned data + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + ownerID: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: + longhorn-manager: "" + name: recurringjobs.longhorn.io +spec: + group: longhorn.io + names: + kind: RecurringJob + listKind: RecurringJobList + plural: recurringjobs + shortNames: + - lhrj + singular: recurringjob + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume + jsonPath: .spec.groups + name: Groups + type: string + - description: Should be one of "backup" or "snapshot" + jsonPath: .spec.task + name: Task + type: string + - description: The cron expression represents recurring job scheduling + jsonPath: .spec.cron + name: Cron + type: string + - description: The number of snapshots/backups to keep for the volume + jsonPath: .spec.retain + name: Retain + type: integer + - description: The concurrent job to run by each cron job + jsonPath: .spec.concurrency + name: Concurrency + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Specify the labels + jsonPath: .spec.labels + name: Labels + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: RecurringJob is where Longhorn stores recurring job object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: Sets groupings to the jobs. When set to "default" group will be added to the volume label when no other job label exist in volume + jsonPath: .spec.groups + name: Groups + type: string + - description: Should be one of "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup", "backup-force-create" or "filesystem-trim" + jsonPath: .spec.task + name: Task + type: string + - description: The cron expression represents recurring job scheduling + jsonPath: .spec.cron + name: Cron + type: string + - description: The number of snapshots/backups to keep for the volume + jsonPath: .spec.retain + name: Retain + type: integer + - description: The concurrent job to run by each cron job + jsonPath: .spec.concurrency + name: Concurrency + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Specify the labels + jsonPath: .spec.labels + name: Labels + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: RecurringJob is where Longhorn stores recurring job object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RecurringJobSpec defines the desired state of the Longhorn recurring job + properties: + concurrency: + description: The concurrency of taking the snapshot/backup. + type: integer + cron: + description: The cron setting. + type: string + groups: + description: The recurring job group. + items: + type: string + type: array + labels: + additionalProperties: + type: string + description: The label of the snapshot/backup. + type: object + name: + description: The recurring job name. + type: string + retain: + description: The retain count of the snapshot/backup. + type: integer + task: + description: The recurring job task. Can be "snapshot", "snapshot-force-create", "snapshot-cleanup", "snapshot-delete", "backup", "backup-force-create" or "filesystem-trim" + enum: + - snapshot + - snapshot-force-create + - snapshot-cleanup + - snapshot-delete + - backup + - backup-force-create + - filesystem-trim + type: string + type: object + status: + description: RecurringJobStatus defines the observed state of the Longhorn recurring job + properties: + ownerID: + description: The owner ID which is responsible to reconcile this recurring job CR. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: replicas.longhorn.io +spec: + group: longhorn.io + names: + kind: Replica + listKind: ReplicaList + plural: replicas + shortNames: + - lhr + singular: replica + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The current state of the replica + jsonPath: .status.currentState + name: State + type: string + - description: The node that the replica is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk that the replica is on + jsonPath: .spec.diskID + name: Disk + type: string + - description: The instance manager of the replica + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the replica + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Replica is where Longhorn stores replica object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the replica + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The current state of the replica + jsonPath: .status.currentState + name: State + type: string + - description: The node that the replica is on + jsonPath: .spec.nodeID + name: Node + type: string + - description: The disk that the replica is on + jsonPath: .spec.diskID + name: Disk + type: string + - description: The instance manager of the replica + jsonPath: .status.instanceManagerName + name: InstanceManager + type: string + - description: The current image of the replica + jsonPath: .status.currentImage + name: Image + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Replica is where Longhorn stores replica object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ReplicaSpec defines the desired state of the Longhorn replica + properties: + active: + type: boolean + backendStoreDriver: + description: 'Deprecated: Replaced by field `dataEngine`.' + type: string + backingImage: + type: string + dataDirectoryName: + type: string + dataEngine: + enum: + - v1 + - v2 + type: string + desireState: + type: string + diskID: + type: string + diskPath: + type: string + engineImage: + description: 'Deprecated: Replaced by field `image`.' + type: string + engineName: + type: string + evictionRequested: + type: boolean + failedAt: + description: FailedAt is set when a running replica fails or when a running engine is unable to use a replica for any reason. FailedAt indicates the time the failure occurred. When FailedAt is set, a replica is likely to have useful (though possibly stale) data. A replica with FailedAt set must be rebuilt from a non-failed replica (or it can be used in a salvage if all replicas are failed). FailedAt is cleared before a rebuild or salvage. FailedAt may be later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller acknowledges the change. + type: string + hardNodeAffinity: + type: string + healthyAt: + description: HealthyAt is set the first time a replica becomes read/write in an engine after creation or rebuild. HealthyAt indicates the time the last successful rebuild occurred. When HealthyAt is set, a replica is likely to have useful (though possibly stale) data. HealthyAt is cleared before a rebuild. HealthyAt may be later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller acknowledges the change. + type: string + image: + type: string + lastFailedAt: + description: LastFailedAt is always set at the same time as FailedAt. Unlike FailedAt, LastFailedAt is never cleared. LastFailedAt is not a reliable indicator of the state of a replica's data. For example, a replica with LastFailedAt may already be healthy and in use again. However, because it is never cleared, it can be compared to LastHealthyAt to help prevent dangerous replica deletion in some corner cases. LastFailedAt may be later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller acknowledges the change. + type: string + lastHealthyAt: + description: LastHealthyAt is set every time a replica becomes read/write in an engine. Unlike HealthyAt, LastHealthyAt is never cleared. LastHealthyAt is not a reliable indicator of the state of a replica's data. For example, a replica with LastHealthyAt set may be in the middle of a rebuild. However, because it is never cleared, it can be compared to LastFailedAt to help prevent dangerous replica deletion in some corner cases. LastHealthyAt may be later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller acknowledges the change. + type: string + logRequested: + type: boolean + nodeID: + type: string + rebuildRetryCount: + type: integer + revisionCounterDisabled: + type: boolean + salvageRequested: + type: boolean + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + unmapMarkDiskChainRemovedEnabled: + type: boolean + volumeName: + type: string + volumeSize: + format: int64 + type: string + type: object + status: + description: ReplicaStatus defines the observed state of the Longhorn replica + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + currentImage: + type: string + currentState: + type: string + evictionRequested: + description: 'Deprecated: Replaced by field `spec.evictionRequested`.' + type: boolean + instanceManagerName: + type: string + ip: + type: string + logFetched: + type: boolean + ownerID: + type: string + port: + type: integer + salvageExecuted: + type: boolean + started: + type: boolean + storageIP: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: settings.longhorn.io +spec: + group: longhorn.io + names: + kind: Setting + listKind: SettingList + plural: settings + shortNames: + - lhs + singular: setting + preserveUnknownFields: false + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The value of the setting + jsonPath: .value + name: Value + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Setting is where Longhorn stores setting object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + value: + type: string + required: + - value + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The value of the setting + jsonPath: .value + name: Value + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Setting is where Longhorn stores setting object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + value: + description: The value of the setting. + type: string + required: + - value + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: sharemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: ShareManager + listKind: ShareManagerList + plural: sharemanagers + shortNames: + - lhsm + singular: sharemanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the share manager + jsonPath: .status.state + name: State + type: string + - description: The node that the share manager is owned by + jsonPath: .status.ownerID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: ShareManager is where Longhorn stores share manager object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The state of the share manager + jsonPath: .status.state + name: State + type: string + - description: The node that the share manager is owned by + jsonPath: .status.ownerID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: ShareManager is where Longhorn stores share manager object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ShareManagerSpec defines the desired state of the Longhorn share manager + properties: + image: + description: Share manager image used for creating a share manager pod + type: string + type: object + status: + description: ShareManagerStatus defines the observed state of the Longhorn share manager + properties: + endpoint: + description: NFS endpoint that can access the mounted filesystem of the volume + type: string + ownerID: + description: The node ID on which the controller is responsible to reconcile this share manager resource + type: string + state: + description: The state of the share manager resource + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: snapshots.longhorn.io +spec: + group: longhorn.io + names: + kind: Snapshot + listKind: SnapshotList + plural: snapshots + shortNames: + - lhsnap + singular: snapshot + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The volume that this snapshot belongs to + jsonPath: .spec.volume + name: Volume + type: string + - description: Timestamp when the point-in-time snapshot was taken + jsonPath: .status.creationTime + name: CreationTime + type: string + - description: Indicates if the snapshot is ready to be used to restore/backup a volume + jsonPath: .status.readyToUse + name: ReadyToUse + type: boolean + - description: Represents the minimum size of volume required to rehydrate from this snapshot + jsonPath: .status.restoreSize + name: RestoreSize + type: string + - description: The actual size of the snapshot + jsonPath: .status.size + name: Size + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Snapshot is the Schema for the snapshots API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SnapshotSpec defines the desired state of Longhorn Snapshot + properties: + createSnapshot: + description: require creating a new snapshot + type: boolean + labels: + additionalProperties: + type: string + description: The labels of snapshot + nullable: true + type: object + volume: + description: the volume that this snapshot belongs to. This field is immutable after creation. Required + type: string + required: + - volume + type: object + status: + description: SnapshotStatus defines the observed state of Longhorn Snapshot + properties: + checksum: + type: string + children: + additionalProperties: + type: boolean + nullable: true + type: object + creationTime: + type: string + error: + type: string + labels: + additionalProperties: + type: string + nullable: true + type: object + markRemoved: + type: boolean + ownerID: + type: string + parent: + type: string + readyToUse: + type: boolean + restoreSize: + format: int64 + type: integer + size: + format: int64 + type: integer + userCreated: + type: boolean + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: supportbundles.longhorn.io +spec: + group: longhorn.io + names: + kind: SupportBundle + listKind: SupportBundleList + plural: supportbundles + shortNames: + - lhbundle + singular: supportbundle + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the support bundle + jsonPath: .status.state + name: State + type: string + - description: The issue URL + jsonPath: .spec.issueURL + name: Issue + type: string + - description: A brief description of the issue + jsonPath: .spec.description + name: Description + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SupportBundle is where Longhorn stores support bundle object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SupportBundleSpec defines the desired state of the Longhorn SupportBundle + properties: + description: + description: A brief description of the issue + type: string + issueURL: + description: The issue URL + nullable: true + type: string + nodeID: + description: The preferred responsible controller node ID. + type: string + required: + - description + type: object + status: + description: SupportBundleStatus defines the observed state of the Longhorn SupportBundle + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + type: array + filename: + type: string + filesize: + format: int64 + type: integer + image: + description: The support bundle manager image + type: string + managerIP: + description: The support bundle manager IP + type: string + ownerID: + description: The current responsible controller node ID + type: string + progress: + type: integer + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: systembackups.longhorn.io +spec: + group: longhorn.io + names: + kind: SystemBackup + listKind: SystemBackupList + plural: systembackups + shortNames: + - lhsb + singular: systembackup + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The system backup Longhorn version + jsonPath: .status.version + name: Version + type: string + - description: The system backup state + jsonPath: .status.state + name: State + type: string + - description: The system backup creation time + jsonPath: .status.createdAt + name: Created + type: string + - description: The last time that the system backup was synced into the cluster + jsonPath: .status.lastSyncedAt + name: LastSyncedAt + type: string + name: v1beta2 + schema: + openAPIV3Schema: + description: SystemBackup is where Longhorn stores system backup object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SystemBackupSpec defines the desired state of the Longhorn SystemBackup + properties: + volumeBackupPolicy: + description: The create volume backup policy Can be "if-not-present", "always" or "disabled" + nullable: true + type: string + type: object + status: + description: SystemBackupStatus defines the observed state of the Longhorn SystemBackup + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + createdAt: + description: The system backup creation time. + format: date-time + type: string + gitCommit: + description: The saved Longhorn manager git commit. + nullable: true + type: string + lastSyncedAt: + description: The last time that the system backup was synced into the cluster. + format: date-time + nullable: true + type: string + managerImage: + description: The saved manager image. + type: string + ownerID: + description: The node ID of the responsible controller to reconcile this SystemBackup. + type: string + state: + description: The system backup state. + type: string + version: + description: The saved Longhorn version. + nullable: true + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: systemrestores.longhorn.io +spec: + group: longhorn.io + names: + kind: SystemRestore + listKind: SystemRestoreList + plural: systemrestores + shortNames: + - lhsr + singular: systemrestore + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The system restore state + jsonPath: .status.state + name: State + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SystemRestore is where Longhorn stores system restore object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SystemRestoreSpec defines the desired state of the Longhorn SystemRestore + properties: + systemBackup: + description: The system backup name in the object store. + type: string + required: + - systemBackup + type: object + status: + description: SystemRestoreStatus defines the observed state of the Longhorn SystemRestore + properties: + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + ownerID: + description: The node ID of the responsible controller to reconcile this SystemRestore. + type: string + sourceURL: + description: The source system backup URL. + type: string + state: + description: The system restore state. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: volumes.longhorn.io +spec: + preserveUnknownFields: false + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} + path: /v1/webhook/conversion + port: 9501 + conversionReviewVersions: + - v1beta2 + - v1beta1 + group: longhorn.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + shortNames: + - lhv + singular: volume + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The state of the volume + jsonPath: .status.state + name: State + type: string + - description: The robustness of the volume + jsonPath: .status.robustness + name: Robustness + type: string + - description: The scheduled condition of the volume + jsonPath: .status.conditions['scheduled']['status'] + name: Scheduled + type: string + - description: The size of the volume + jsonPath: .spec.size + name: Size + type: string + - description: The node that the volume is currently attaching to + jsonPath: .status.currentNodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Volume is where Longhorn stores volume object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The data engine of the volume + jsonPath: .spec.dataEngine + name: Data Engine + type: string + - description: The state of the volume + jsonPath: .status.state + name: State + type: string + - description: The robustness of the volume + jsonPath: .status.robustness + name: Robustness + type: string + - description: The scheduled condition of the volume + jsonPath: .status.conditions[?(@.type=='Schedulable')].status + name: Scheduled + type: string + - description: The size of the volume + jsonPath: .spec.size + name: Size + type: string + - description: The node that the volume is currently attaching to + jsonPath: .status.currentNodeID + name: Node + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Volume is where Longhorn stores volume object. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeSpec defines the desired state of the Longhorn volume + properties: + Standby: + type: boolean + accessMode: + enum: + - rwo + - rwx + type: string + backendStoreDriver: + description: 'Deprecated: Replaced by field `dataEngine`.' + type: string + backingImage: + type: string + backupCompressionMethod: + enum: + - none + - lz4 + - gzip + type: string + dataEngine: + enum: + - v1 + - v2 + type: string + dataLocality: + enum: + - disabled + - best-effort + - strict-local + type: string + dataSource: + type: string + disableFrontend: + type: boolean + diskSelector: + items: + type: string + type: array + encrypted: + type: boolean + engineImage: + description: 'Deprecated: Replaced by field `image`.' + type: string + fromBackup: + type: string + frontend: + enum: + - blockdev + - iscsi + - nvmf + - "" + type: string + image: + type: string + lastAttachedBy: + type: string + migratable: + type: boolean + migrationNodeID: + type: string + nodeID: + type: string + nodeSelector: + items: + type: string + type: array + numberOfReplicas: + type: integer + offlineReplicaRebuilding: + description: OfflineReplicaRebuilding is used to determine if the offline replica rebuilding feature is enabled or not + enum: + - ignored + - disabled + - enabled + type: string + replicaAutoBalance: + enum: + - ignored + - disabled + - least-effort + - best-effort + type: string + replicaDiskSoftAntiAffinity: + description: Replica disk soft anti affinity of the volume. Set enabled to allow replicas to be scheduled in the same disk. + enum: + - ignored + - enabled + - disabled + type: string + replicaSoftAntiAffinity: + description: Replica soft anti affinity of the volume. Set enabled to allow replicas to be scheduled on the same node. + enum: + - ignored + - enabled + - disabled + type: string + replicaZoneSoftAntiAffinity: + description: Replica zone soft anti affinity of the volume. Set enabled to allow replicas to be scheduled in the same zone. + enum: + - ignored + - enabled + - disabled + type: string + restoreVolumeRecurringJob: + enum: + - ignored + - enabled + - disabled + type: string + revisionCounterDisabled: + type: boolean + size: + format: int64 + type: string + snapshotDataIntegrity: + enum: + - ignored + - disabled + - enabled + - fast-check + type: string + snapshotMaxCount: + type: integer + snapshotMaxSize: + format: int64 + type: string + staleReplicaTimeout: + type: integer + unmapMarkSnapChainRemoved: + enum: + - ignored + - disabled + - enabled + type: string + type: object + status: + description: VolumeStatus defines the observed state of the Longhorn volume + properties: + actualSize: + format: int64 + type: integer + cloneStatus: + properties: + snapshot: + type: string + sourceVolume: + type: string + state: + type: string + type: object + conditions: + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + currentImage: + type: string + currentMigrationNodeID: + description: the node that this volume is currently migrating to + type: string + currentNodeID: + type: string + expansionRequired: + type: boolean + frontendDisabled: + type: boolean + isStandby: + type: boolean + kubernetesStatus: + properties: + lastPVCRefAt: + type: string + lastPodRefAt: + type: string + namespace: + description: determine if PVC/Namespace is history or not + type: string + pvName: + type: string + pvStatus: + type: string + pvcName: + type: string + workloadsStatus: + description: determine if Pod/Workload is history or not + items: + properties: + podName: + type: string + podStatus: + type: string + workloadName: + type: string + workloadType: + type: string + type: object + nullable: true + type: array + type: object + lastBackup: + type: string + lastBackupAt: + type: string + lastDegradedAt: + type: string + offlineReplicaRebuildingRequired: + type: boolean + ownerID: + type: string + pendingNodeID: + description: Deprecated. + type: string + remountRequestedAt: + type: string + restoreInitiated: + type: boolean + restoreRequired: + type: boolean + robustness: + type: string + shareEndpoint: + type: string + shareState: + type: string + state: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: null + labels: {{- include "longhorn.labels" . | nindent 4 }} + longhorn-manager: "" + name: volumeattachments.longhorn.io +spec: + group: longhorn.io + names: + kind: VolumeAttachment + listKind: VolumeAttachmentList + plural: volumeattachments + shortNames: + - lhva + singular: volumeattachment + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: VolumeAttachment stores attachment information of a Longhorn volume + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: VolumeAttachmentSpec defines the desired state of Longhorn VolumeAttachment + properties: + attachmentTickets: + additionalProperties: + properties: + generation: + description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + format: int64 + type: integer + id: + description: The unique ID of this attachment. Used to differentiate different attachments of the same volume. + type: string + nodeID: + description: The node that this attachment is requesting + type: string + parameters: + additionalProperties: + type: string + description: Optional additional parameter for this attachment + type: object + type: + type: string + type: object + type: object + volume: + description: The name of Longhorn volume of this VolumeAttachment + type: string + required: + - volume + type: object + status: + description: VolumeAttachmentStatus defines the observed state of Longhorn VolumeAttachment + properties: + attachmentTicketStatuses: + additionalProperties: + properties: + conditions: + description: Record any error when trying to fulfill this attachment + items: + properties: + lastProbeTime: + description: Last time we probed the condition. + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, one-word, CamelCase reason for the condition's last transition. + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. + type: string + type: object + nullable: true + type: array + generation: + description: A sequence number representing a specific generation of the desired state. Populated by the system. Read-only. + format: int64 + type: integer + id: + description: The unique ID of this attachment. Used to differentiate different attachments of the same volume. + type: string + satisfied: + description: Indicate whether this attachment ticket has been satisfied + type: boolean + required: + - conditions + - satisfied + type: object + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/daemonset-sa.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/daemonset-sa.yaml new file mode 100644 index 00000000000..2fa1cbc2434 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/daemonset-sa.yaml @@ -0,0 +1,167 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-manager + name: longhorn-manager + namespace: {{ include "release_namespace" . }} +spec: + selector: + matchLabels: + app: longhorn-manager + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-manager + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: longhorn-manager + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + securityContext: + privileged: true + command: + - longhorn-manager + - -d + {{- if eq .Values.longhornManager.log.format "json" }} + - -j + {{- end }} + - daemon + - --engine-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}" + - --instance-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}" + - --share-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}" + - --backing-image-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}" + - --support-bundle-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.supportBundleKit.repository }}:{{ .Values.image.longhorn.supportBundleKit.tag }}" + - --manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" + - --service-account + - longhorn-service-account + {{- if .Values.preUpgradeChecker.upgradeVersionCheck}} + - --upgrade-version-check + {{- end }} + ports: + - containerPort: 9500 + name: manager + - containerPort: 9501 + name: conversion-wh + - containerPort: 9502 + name: admission-wh + - containerPort: 9503 + name: recov-backend + readinessProbe: + httpGet: + path: /v1/healthz + port: 9501 + scheme: HTTPS + volumeMounts: + - name: dev + mountPath: /host/dev/ + - name: proc + mountPath: /host/proc/ + - name: longhorn + mountPath: /var/lib/longhorn/ + mountPropagation: Bidirectional + - name: longhorn-grpc-tls + mountPath: /tls-files/ + {{- if .Values.enableGoCoverDir }} + - name: go-cover-dir + mountPath: /go-cover-dir/ + {{- end }} + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- if .Values.enableGoCoverDir }} + - name: GOCOVERDIR + value: /go-cover-dir/ + {{- end }} + volumes: + - name: dev + hostPath: + path: /dev/ + - name: proc + hostPath: + path: /proc/ + - name: longhorn + hostPath: + path: /var/lib/longhorn/ + {{- if .Values.enableGoCoverDir }} + - name: go-cover-dir + hostPath: + path: /go-cover-dir/ + type: DirectoryOrCreate + {{- end }} + - name: longhorn-grpc-tls + secret: + secretName: longhorn-grpc-tls + optional: true + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} + {{- end }} + {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.tolerations }} +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: longhorn-service-account + updateStrategy: + rollingUpdate: + maxUnavailable: "100%" +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-manager + name: longhorn-backend + namespace: {{ include "release_namespace" . }} + {{- if .Values.longhornManager.serviceAnnotations }} + annotations: +{{ toYaml .Values.longhornManager.serviceAnnotations | indent 4 }} + {{- end }} +spec: + type: {{ .Values.service.manager.type }} + selector: + app: longhorn-manager + ports: + - name: manager + port: 9500 + targetPort: manager + {{- if .Values.service.manager.nodePort }} + nodePort: {{ .Values.service.manager.nodePort }} + {{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/default-setting.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/default-setting.yaml new file mode 100644 index 00000000000..f0c46663787 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/default-setting.yaml @@ -0,0 +1,235 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +data: + default-setting.yaml: |- + {{- if not (kindIs "invalid" .Values.defaultSettings.backupTarget) }} + backup-target: {{ .Values.defaultSettings.backupTarget }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupTargetCredentialSecret) }} + backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowRecurringJobWhileVolumeDetached) }} + allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.createDefaultDiskLabeledNodes) }} + create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataPath) }} + default-data-path: {{ .Values.defaultSettings.defaultDataPath }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaSoftAntiAffinity) }} + replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaAutoBalance) }} + replica-auto-balance: {{ .Values.defaultSettings.replicaAutoBalance }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageOverProvisioningPercentage) }} + storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageMinimalAvailablePercentage) }} + storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageReservedPercentageForDefaultDisk) }} + storage-reserved-percentage-for-default-disk: {{ .Values.defaultSettings.storageReservedPercentageForDefaultDisk }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.upgradeChecker) }} + upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultReplicaCount) }} + default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultDataLocality) }} + default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.defaultLonghornStaticStorageClass) }} + default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupstorePollInterval) }} + backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.failedBackupTTL) }} + failed-backup-ttl: {{ .Values.defaultSettings.failedBackupTTL }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.restoreVolumeRecurringJobs) }} + restore-volume-recurring-jobs: {{ .Values.defaultSettings.restoreVolumeRecurringJobs }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit) }} + recurring-successful-jobs-history-limit: {{ .Values.defaultSettings.recurringSuccessfulJobsHistoryLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.recurringJobMaxRetention) }} + recurring-job-max-retention: {{ .Values.defaultSettings.recurringJobMaxRetention }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.recurringFailedJobsHistoryLimit) }} + recurring-failed-jobs-history-limit: {{ .Values.defaultSettings.recurringFailedJobsHistoryLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.supportBundleFailedHistoryLimit) }} + support-bundle-failed-history-limit: {{ .Values.defaultSettings.supportBundleFailedHistoryLimit }} + {{- end }} + {{- if or (not (kindIs "invalid" .Values.defaultSettings.taintToleration)) (.Values.global.cattle.windowsCluster.enabled) }} + taint-toleration: {{ $windowsDefaultSettingTaintToleration := list }}{{ $defaultSettingTaintToleration := list -}} + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}} + {{- $windowsDefaultSettingTaintToleration = .Values.global.cattle.windowsCluster.defaultSetting.taintToleration -}} + {{- end -}} + {{- if not (kindIs "invalid" .Values.defaultSettings.taintToleration) -}} + {{- $defaultSettingTaintToleration = .Values.defaultSettings.taintToleration -}} + {{- end -}} + {{- $taintToleration := list $windowsDefaultSettingTaintToleration $defaultSettingTaintToleration }}{{ join ";" (compact $taintToleration) -}} + {{- end }} + {{- if or (not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector)) (.Values.global.cattle.windowsCluster.enabled) }} + system-managed-components-node-selector: {{ $windowsDefaultSettingNodeSelector := list }}{{ $defaultSettingNodeSelector := list -}} + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}} + {{ $windowsDefaultSettingNodeSelector = .Values.global.cattle.windowsCluster.defaultSetting.systemManagedComponentsNodeSelector -}} + {{- end -}} + {{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedComponentsNodeSelector) -}} + {{- $defaultSettingNodeSelector = .Values.defaultSettings.systemManagedComponentsNodeSelector -}} + {{- end -}} + {{- $nodeSelector := list $windowsDefaultSettingNodeSelector $defaultSettingNodeSelector }}{{ join ";" (compact $nodeSelector) -}} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.priorityClass) }} + priority-class: {{ .Values.defaultSettings.priorityClass }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoSalvage) }} + auto-salvage: {{ .Values.defaultSettings.autoSalvage }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly) }} + auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.disableSchedulingOnCordonedNode) }} + disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaZoneSoftAntiAffinity) }} + replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaDiskSoftAntiAffinity) }} + replica-disk-soft-anti-affinity: {{ .Values.defaultSettings.replicaDiskSoftAntiAffinity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.nodeDownPodDeletionPolicy) }} + node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.nodeDrainPolicy) }} + node-drain-policy: {{ .Values.defaultSettings.nodeDrainPolicy }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned) }} + detach-manually-attached-volumes-when-cordoned: {{ .Values.defaultSettings.detachManuallyAttachedVolumesWhenCordoned }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaReplenishmentWaitInterval) }} + replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit) }} + concurrent-replica-rebuild-per-node-limit: {{ .Values.defaultSettings.concurrentReplicaRebuildPerNodeLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit) }} + concurrent-volume-backup-restore-per-node-limit: {{ .Values.defaultSettings.concurrentVolumeBackupRestorePerNodeLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.disableRevisionCounter) }} + disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.systemManagedPodsImagePullPolicy) }} + system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability) }} + allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot) }} + auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot) }} + auto-cleanup-recurring-job-backup-snapshot: {{ .Values.defaultSettings.autoCleanupRecurringJobBackupSnapshot }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit) }} + concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backingImageCleanupWaitInterval) }} + backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backingImageRecoveryWaitInterval) }} + backing-image-recovery-wait-interval: {{ .Values.defaultSettings.backingImageRecoveryWaitInterval }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.guaranteedInstanceManagerCPU) }} + guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.guaranteedInstanceManagerCPU }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.kubernetesClusterAutoscalerEnabled) }} + kubernetes-cluster-autoscaler-enabled: {{ .Values.defaultSettings.kubernetesClusterAutoscalerEnabled }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.orphanAutoDeletion) }} + orphan-auto-deletion: {{ .Values.defaultSettings.orphanAutoDeletion }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.storageNetwork) }} + storage-network: {{ .Values.defaultSettings.storageNetwork }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.deletingConfirmationFlag) }} + deleting-confirmation-flag: {{ .Values.defaultSettings.deletingConfirmationFlag }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.engineReplicaTimeout) }} + engine-replica-timeout: {{ .Values.defaultSettings.engineReplicaTimeout }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrity) }} + snapshot-data-integrity: {{ .Values.defaultSettings.snapshotDataIntegrity }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation) }} + snapshot-data-integrity-immediate-check-after-snapshot-creation: {{ .Values.defaultSettings.snapshotDataIntegrityImmediateCheckAfterSnapshotCreation }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotDataIntegrityCronjob) }} + snapshot-data-integrity-cronjob: {{ .Values.defaultSettings.snapshotDataIntegrityCronjob }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim) }} + remove-snapshots-during-filesystem-trim: {{ .Values.defaultSettings.removeSnapshotsDuringFilesystemTrim }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.fastReplicaRebuildEnabled) }} + fast-replica-rebuild-enabled: {{ .Values.defaultSettings.fastReplicaRebuildEnabled }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.replicaFileSyncHttpClientTimeout) }} + replica-file-sync-http-client-timeout: {{ .Values.defaultSettings.replicaFileSyncHttpClientTimeout }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.logLevel) }} + log-level: {{ .Values.defaultSettings.logLevel }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupCompressionMethod) }} + backup-compression-method: {{ .Values.defaultSettings.backupCompressionMethod }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.backupConcurrentLimit) }} + backup-concurrent-limit: {{ .Values.defaultSettings.backupConcurrentLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.restoreConcurrentLimit) }} + restore-concurrent-limit: {{ .Values.defaultSettings.restoreConcurrentLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v1DataEngine) }} + v1-data-engine: {{ .Values.defaultSettings.v1DataEngine }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngine) }} + v2-data-engine: {{ .Values.defaultSettings.v2DataEngine }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineHugepageLimit) }} + v2-data-engine-hugepage-limit: {{ .Values.defaultSettings.v2DataEngineHugepageLimit }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.offlineReplicaRebuilding) }} + offline-replica-rebuilding: {{ .Values.defaultSettings.offlineReplicaRebuilding }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyNodeSelectorVolume) }} + allow-empty-node-selector-volume: {{ .Values.defaultSettings.allowEmptyNodeSelectorVolume }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowEmptyDiskSelectorVolume) }} + allow-empty-disk-selector-volume: {{ .Values.defaultSettings.allowEmptyDiskSelectorVolume }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.allowCollectingLonghornUsageMetrics) }} + allow-collecting-longhorn-usage-metrics: {{ .Values.defaultSettings.allowCollectingLonghornUsageMetrics }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.disableSnapshotPurge) }} + disable-snapshot-purge: {{ .Values.defaultSettings.disableSnapshotPurge }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU) }} + v2-data-engine-guaranteed-instance-manager-cpu: {{ .Values.defaultSettings.v2DataEngineGuaranteedInstanceManagerCPU }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.snapshotMaxCount) }} + snapshot-max-count: {{ .Values.defaultSettings.snapshotMaxCount }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineLogLevel) }} + v2-data-engine-log-level: {{ .Values.defaultSettings.v2DataEngineLogLevel }} + {{- end }} + {{- if not (kindIs "invalid" .Values.defaultSettings.v2DataEngineLogFlags) }} + v2-data-engine-log-flags: {{ .Values.defaultSettings.v2DataEngineLogFlags }} + {{- end }} \ No newline at end of file diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/deployment-driver.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/deployment-driver.yaml new file mode 100644 index 00000000000..cd2ab3a344d --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/deployment-driver.yaml @@ -0,0 +1,132 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-driver-deployer + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-driver-deployer + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-driver-deployer + spec: + initContainers: + - name: wait-longhorn-manager + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] + containers: + - name: longhorn-driver-deployer + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - -d + - deploy-driver + - --manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" + - --manager-url + - http://longhorn-backend:9500/v1 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + {{- if .Values.csi.kubeletRootDir }} + - name: KUBELET_ROOT_DIR + value: {{ .Values.csi.kubeletRootDir }} + {{- end }} + {{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }} + - name: CSI_ATTACHER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}" + {{- end }} + {{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }} + - name: CSI_PROVISIONER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}" + {{- end }} + {{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }} + - name: CSI_NODE_DRIVER_REGISTRAR_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}" + {{- end }} + {{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }} + - name: CSI_RESIZER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}" + {{- end }} + {{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }} + - name: CSI_SNAPSHOTTER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}" + {{- end }} + {{- if and .Values.image.csi.livenessProbe.repository .Values.image.csi.livenessProbe.tag }} + - name: CSI_LIVENESS_PROBE_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.livenessProbe.repository }}:{{ .Values.image.csi.livenessProbe.tag }}" + {{- end }} + {{- if .Values.csi.attacherReplicaCount }} + - name: CSI_ATTACHER_REPLICA_COUNT + value: {{ .Values.csi.attacherReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.provisionerReplicaCount }} + - name: CSI_PROVISIONER_REPLICA_COUNT + value: {{ .Values.csi.provisionerReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.resizerReplicaCount }} + - name: CSI_RESIZER_REPLICA_COUNT + value: {{ .Values.csi.resizerReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.snapshotterReplicaCount }} + - name: CSI_SNAPSHOTTER_REPLICA_COUNT + value: {{ .Values.csi.snapshotterReplicaCount | quote }} + {{- end }} + {{- if .Values.enableGoCoverDir }} + - name: GOCOVERDIR + value: /go-cover-dir/ + volumeMounts: + - name: go-cover-dir + mountPath: /go-cover-dir/ + {{- end }} + + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornDriver.priorityClass }} + priorityClassName: {{ .Values.longhornDriver.priorityClass | quote }} + {{- end }} + {{- if or .Values.longhornDriver.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornDriver.tolerations }} +{{ toYaml .Values.longhornDriver.tolerations | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.longhornDriver.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.longhornDriver.nodeSelector }} +{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }} + {{- end }} + {{- end }} + serviceAccountName: longhorn-service-account + securityContext: + runAsUser: 0 + {{- if .Values.enableGoCoverDir }} + volumes: + - name: go-cover-dir + hostPath: + path: /go-cover-dir/ + type: DirectoryOrCreate + {{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/deployment-ui.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/deployment-ui.yaml new file mode 100644 index 00000000000..0ee86c79046 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/deployment-ui.yaml @@ -0,0 +1,182 @@ +{{- if .Values.openshift.enabled }} +{{- if .Values.openshift.ui.route }} +# https://github.com/openshift/oauth-proxy/blob/master/contrib/sidecar.yaml +# Create a proxy service account and ensure it will use the route "proxy" +# Create a secure connection to the proxy via a route +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: {{ .Values.openshift.ui.route }} + namespace: {{ include "release_namespace" . }} +spec: + to: + kind: Service + name: longhorn-ui + tls: + termination: reencrypt +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: longhorn-ui + namespace: {{ include "release_namespace" . }} + annotations: + service.alpha.openshift.io/serving-cert-secret-name: longhorn-ui-tls +spec: + ports: + - name: longhorn-ui + port: {{ .Values.openshift.ui.port | default 443 }} + targetPort: {{ .Values.openshift.ui.proxy | default 8443 }} + selector: + app: longhorn-ui +--- +{{- end }} +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: longhorn-ui + namespace: {{ include "release_namespace" . }} +spec: + replicas: {{ .Values.longhornUI.replicas }} + selector: + matchLabels: + app: longhorn-ui + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-ui + spec: + serviceAccountName: longhorn-ui-service-account + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - longhorn-ui + topologyKey: kubernetes.io/hostname + containers: + {{- if .Values.openshift.enabled }} + {{- if .Values.openshift.ui.route }} + - name: oauth-proxy + image: {{ template "registry_url" . }}{{ .Values.image.openshift.oauthProxy.repository }}:{{ .Values.image.openshift.oauthProxy.tag }} + imagePullPolicy: IfNotPresent + ports: + - containerPort: {{ .Values.openshift.ui.proxy | default 8443 }} + name: public + args: + - --https-address=:{{ .Values.openshift.ui.proxy | default 8443 }} + - --provider=openshift + - --openshift-service-account=longhorn-ui-service-account + - --upstream=http://localhost:8000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - --openshift-sar={"namespace":"{{ include "release_namespace" . }}","group":"longhorn.io","resource":"setting","verb":"delete"} + volumeMounts: + - mountPath: /etc/tls/private + name: longhorn-ui-tls + {{- end }} + {{- end }} + - name: longhorn-ui + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + volumeMounts: + - name : nginx-cache + mountPath: /var/cache/nginx/ + - name : nginx-config + mountPath: /var/config/nginx/ + - name: var-run + mountPath: /var/run/ + ports: + - containerPort: 8000 + name: http + env: + - name: LONGHORN_MANAGER_IP + value: "http://longhorn-backend:9500" + - name: LONGHORN_UI_PORT + value: "8000" + volumes: + {{- if .Values.openshift.enabled }} + {{- if .Values.openshift.ui.route }} + - name: longhorn-ui-tls + secret: + secretName: longhorn-ui-tls + {{- end }} + {{- end }} + - emptyDir: {} + name: nginx-cache + - emptyDir: {} + name: nginx-config + - emptyDir: {} + name: var-run + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornUI.priorityClass }} + priorityClassName: {{ .Values.longhornUI.priorityClass | quote }} + {{- end }} + {{- if or .Values.longhornUI.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornUI.tolerations }} +{{ toYaml .Values.longhornUI.tolerations | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.longhornUI.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.longhornUI.nodeSelector }} +{{ toYaml .Values.longhornUI.nodeSelector | indent 8 }} + {{- end }} + {{- end }} +--- +kind: Service +apiVersion: v1 +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + {{- if eq .Values.service.ui.type "Rancher-Proxy" }} + kubernetes.io/cluster-service: "true" + {{- end }} + name: longhorn-frontend + namespace: {{ include "release_namespace" . }} +spec: + {{- if eq .Values.service.ui.type "Rancher-Proxy" }} + type: ClusterIP + {{- else }} + type: {{ .Values.service.ui.type }} + {{- end }} + {{- if and .Values.service.ui.loadBalancerIP (eq .Values.service.ui.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.ui.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.ui.type "LoadBalancer") .Values.service.ui.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.ui.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + selector: + app: longhorn-ui + ports: + - name: http + port: 80 + targetPort: http + {{- if .Values.service.ui.nodePort }} + nodePort: {{ .Values.service.ui.nodePort }} + {{- else }} + nodePort: null + {{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/ingress.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/ingress.yaml new file mode 100644 index 00000000000..9038ff0cc1b --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/ingress.yaml @@ -0,0 +1,37 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: longhorn-ingress + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ingress + annotations: + {{- if .Values.ingress.secureBackends }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ default "" .Values.ingress.path }} + pathType: ImplementationSpecific + backend: + service: + name: longhorn-frontend + port: + number: 80 +{{- if .Values.ingress.tls }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.tlsSecret }} +{{- end }} +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/backing-image-data-source-network-policy.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/backing-image-data-source-network-policy.yaml new file mode 100644 index 00000000000..7204d63caab --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/backing-image-data-source-network-policy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: backing-image-data-source + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + longhorn.io/component: instance-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/backing-image-manager-network-policy.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/backing-image-manager-network-policy.yaml new file mode 100644 index 00000000000..119ebf08a1d --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/backing-image-manager-network-policy.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: backing-image-manager + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + longhorn.io/component: instance-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/instance-manager-networking.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/instance-manager-networking.yaml new file mode 100644 index 00000000000..332aa2c2fef --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/instance-manager-networking.yaml @@ -0,0 +1,27 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: instance-manager + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + longhorn.io/component: instance-manager + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + longhorn.io/component: instance-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-manager + - podSelector: + matchLabels: + longhorn.io/component: backing-image-data-source +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/manager-network-policy.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/manager-network-policy.yaml new file mode 100644 index 00000000000..6f94029a535 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/manager-network-policy.yaml @@ -0,0 +1,35 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-manager + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-manager + policyTypes: + - Ingress + ingress: + - from: + - podSelector: + matchLabels: + app: longhorn-manager + - podSelector: + matchLabels: + app: longhorn-ui + - podSelector: + matchLabels: + app: longhorn-csi-plugin + - podSelector: + matchLabels: + longhorn.io/managed-by: longhorn-manager + matchExpressions: + - { key: recurring-job.longhorn.io, operator: Exists } + - podSelector: + matchExpressions: + - { key: longhorn.io/job-task, operator: Exists } + - podSelector: + matchLabels: + app: longhorn-driver-deployer +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/recovery-backend-network-policy.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/recovery-backend-network-policy.yaml new file mode 100644 index 00000000000..6e34dadfc29 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/recovery-backend-network-policy.yaml @@ -0,0 +1,17 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-recovery-backend + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-manager + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9503 +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/ui-frontend-network-policy.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/ui-frontend-network-policy.yaml new file mode 100644 index 00000000000..6f370659801 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/ui-frontend-network-policy.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.networkPolicies.enabled .Values.ingress.enabled (not (eq .Values.networkPolicies.type "")) }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-ui-frontend + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-ui + policyTypes: + - Ingress + ingress: + - from: + {{- if eq .Values.networkPolicies.type "rke1"}} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ingress-nginx + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + {{- else if eq .Values.networkPolicies.type "rke2" }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: rke2-ingress-nginx + app.kubernetes.io/name: rke2-ingress-nginx + {{- else if eq .Values.networkPolicies.type "k3s" }} + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: kube-system + podSelector: + matchLabels: + app.kubernetes.io/name: traefik + ports: + - port: 8000 + protocol: TCP + - port: 80 + protocol: TCP + {{- end }} +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/webhook-network-policy.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/webhook-network-policy.yaml new file mode 100644 index 00000000000..3575763d395 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/network-policies/webhook-network-policy.yaml @@ -0,0 +1,33 @@ +{{- if .Values.networkPolicies.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-manager + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9501 +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: longhorn-admission-webhook + namespace: {{ include "release_namespace" . }} +spec: + podSelector: + matchLabels: + app: longhorn-manager + policyTypes: + - Ingress + ingress: + - ports: + - protocol: TCP + port: 9502 +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/postupgrade-job.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/postupgrade-job.yaml new file mode 100644 index 00000000000..bb25a54d4e7 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/postupgrade-job.yaml @@ -0,0 +1,56 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation + name: longhorn-post-upgrade + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-post-upgrade + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-post-upgrade + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - post-upgrade + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.tolerations }} +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} + {{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/preupgrade-job.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/preupgrade-job.yaml new file mode 100644 index 00000000000..ef0fe02f431 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/preupgrade-job.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.preUpgradeChecker.jobEnabled .Values.preUpgradeChecker.upgradeVersionCheck}} +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed + name: longhorn-pre-upgrade + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-pre-upgrade + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-pre-upgrade + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - pre-upgrade + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.tolerations }} +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/priorityclass.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/priorityclass.yaml new file mode 100644 index 00000000000..208adc84a2b --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/priorityclass.yaml @@ -0,0 +1,9 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: "longhorn-critical" + labels: {{- include "longhorn.labels" . | nindent 4 }} +description: "Ensure Longhorn pods have the highest priority to prevent any unexpected eviction by the Kubernetes scheduler under node pressure" +globalDefault: false +preemptionPolicy: PreemptLowerPriority +value: 1000000000 diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/psp.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/psp.yaml new file mode 100644 index 00000000000..a2dfc05bef3 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/psp.yaml @@ -0,0 +1,66 @@ +{{- if .Values.enablePSP }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: longhorn-psp + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - NET_RAW + allowedCapabilities: + - SYS_ADMIN + hostNetwork: false + hostIPC: false + hostPID: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + fsGroup: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - secret + - projected + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: longhorn-psp-role + labels: {{- include "longhorn.labels" . | nindent 4 }} + namespace: {{ include "release_namespace" . }} +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - longhorn-psp +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: longhorn-psp-binding + labels: {{- include "longhorn.labels" . | nindent 4 }} + namespace: {{ include "release_namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: longhorn-psp-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: default + namespace: {{ include "release_namespace" . }} +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/registry-secret.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/registry-secret.yaml new file mode 100644 index 00000000000..3c6b1dc510b --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/registry-secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.privateRegistry.createSecret }} +{{- if .Values.privateRegistry.registrySecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.privateRegistry.registrySecret }} + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "secret" . }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/serviceaccount.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/serviceaccount.yaml new file mode 100644 index 00000000000..b0d6dd505b8 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/serviceaccount.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-ui-service-account + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.openshift.enabled }} + {{- if .Values.openshift.ui.route }} + {{- if not .Values.serviceAccount.annotations }} + annotations: + {{- end }} + serviceaccounts.openshift.io/oauth-redirectreference.primary: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"longhorn-ui"}}' + {{- end }} + {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-support-bundle + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} \ No newline at end of file diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/servicemonitor.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/servicemonitor.yaml new file mode 100644 index 00000000000..fd11fe9d47e --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/servicemonitor.yaml @@ -0,0 +1,19 @@ +{{- if .Values.metrics.serviceMonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: longhorn-prometheus-servicemonitor + namespace: {{ include "release_namespace" . }} + labels: + {{- include "longhorn.labels" . | nindent 4 }} + name: longhorn-prometheus-servicemonitor +spec: + selector: + matchLabels: + app: longhorn-manager + namespaceSelector: + matchNames: + - {{ include "release_namespace" . }} + endpoints: + - port: manager +{{- end }} \ No newline at end of file diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/services.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/services.yaml new file mode 100644 index 00000000000..8baef021f35 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/services.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-conversion-webhook + name: longhorn-conversion-webhook + namespace: {{ include "release_namespace" . }} +spec: + type: ClusterIP + selector: + app: longhorn-manager + ports: + - name: conversion-webhook + port: 9501 + targetPort: conversion-wh +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-admission-webhook + name: longhorn-admission-webhook + namespace: {{ include "release_namespace" . }} +spec: + type: ClusterIP + selector: + app: longhorn-manager + ports: + - name: admission-webhook + port: 9502 + targetPort: admission-wh +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-recovery-backend + name: longhorn-recovery-backend + namespace: {{ include "release_namespace" . }} +spec: + type: ClusterIP + selector: + app: longhorn-manager + ports: + - name: recovery-backend + port: 9503 + targetPort: recov-backend +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + name: longhorn-engine-manager + namespace: {{ include "release_namespace" . }} +spec: + clusterIP: None + selector: + longhorn.io/component: instance-manager + longhorn.io/instance-manager-type: engine +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + name: longhorn-replica-manager + namespace: {{ include "release_namespace" . }} +spec: + clusterIP: None + selector: + longhorn.io/component: instance-manager + longhorn.io/instance-manager-type: replica diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/storageclass.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/storageclass.yaml new file mode 100644 index 00000000000..f79699f5e0f --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/storageclass.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-storageclass + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + annotations: + storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }} + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}" + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}" + staleReplicaTimeout: "30" + fromBackup: "" + {{- if .Values.persistence.defaultFsType }} + fsType: "{{ .Values.persistence.defaultFsType }}" + {{- end }} + {{- if .Values.persistence.defaultMkfsParams }} + mkfsParams: "{{ .Values.persistence.defaultMkfsParams }}" + {{- end }} + {{- if .Values.persistence.migratable }} + migratable: "{{ .Values.persistence.migratable }}" + {{- end }} + {{- if .Values.persistence.nfsOptions }} + nfsOptions: "{{ .Values.persistence.nfsOptions }}" + {{- end }} + {{- if .Values.persistence.backingImage.enable }} + backingImage: {{ .Values.persistence.backingImage.name }} + backingImageDataSourceType: {{ .Values.persistence.backingImage.dataSourceType }} + backingImageDataSourceParameters: {{ .Values.persistence.backingImage.dataSourceParameters }} + backingImageChecksum: {{ .Values.persistence.backingImage.expectedChecksum }} + {{- end }} + {{- if .Values.persistence.recurringJobSelector.enable }} + recurringJobSelector: '{{ .Values.persistence.recurringJobSelector.jobList }}' + {{- end }} + dataLocality: {{ .Values.persistence.defaultDataLocality | quote }} + {{- if .Values.persistence.defaultNodeSelector.enable }} + nodeSelector: "{{ .Values.persistence.defaultNodeSelector.selector }}" + {{- end }} + {{- if .Values.persistence.removeSnapshotsDuringFilesystemTrim }} + unmapMarkSnapChainRemoved: "{{ .Values.persistence.removeSnapshotsDuringFilesystemTrim }}" + {{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/tls-secrets.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/tls-secrets.yaml new file mode 100644 index 00000000000..74c43426de1 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/tls-secrets.yaml @@ -0,0 +1,16 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "release_namespace" $ }} + labels: {{- include "longhorn.labels" $ | nindent 4 }} + app: longhorn +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/uninstall-job.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/uninstall-job.yaml new file mode 100644 index 00000000000..968f4206164 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/uninstall-job.yaml @@ -0,0 +1,57 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + name: longhorn-uninstall + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-uninstall + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-uninstall + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - longhorn-manager + - uninstall + - --force + env: + - name: LONGHORN_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: Never + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote }} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if or .Values.longhornManager.tolerations .Values.global.cattle.windowsCluster.enabled }} + tolerations: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.tolerations }} +{{ toYaml .Values.global.cattle.windowsCluster.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.tolerations }} +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.longhornManager.nodeSelector .Values.global.cattle.windowsCluster.enabled }} + nodeSelector: + {{- if and .Values.global.cattle.windowsCluster.enabled .Values.global.cattle.windowsCluster.nodeSelector }} +{{ toYaml .Values.global.cattle.windowsCluster.nodeSelector | indent 8 }} + {{- end }} + {{- if or .Values.longhornManager.nodeSelector }} +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} + {{- end }} diff --git a/deploy/charts/harvester/dependency_charts/longhorn/templates/validate-psp-install.yaml b/deploy/charts/harvester/dependency_charts/longhorn/templates/validate-psp-install.yaml new file mode 100644 index 00000000000..0df98e3657c --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/templates/validate-psp-install.yaml @@ -0,0 +1,7 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +#{{- if .Values.enablePSP }} +#{{- if not (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +#{{- fail "The target cluster does not have the PodSecurityPolicy API resource. Please disable PSPs in this chart before proceeding." -}} +#{{- end }} +#{{- end }} +#{{- end }} \ No newline at end of file diff --git a/deploy/charts/harvester/dependency_charts/longhorn/values.yaml b/deploy/charts/harvester/dependency_charts/longhorn/values.yaml new file mode 100644 index 00000000000..5c00241f5d3 --- /dev/null +++ b/deploy/charts/harvester/dependency_charts/longhorn/values.yaml @@ -0,0 +1,487 @@ +# Default values for longhorn. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + cattle: + # -- Default system registry. + systemDefaultRegistry: "" + windowsCluster: + # -- Setting that allows Longhorn to run on a Rancher Windows cluster. + enabled: false + # -- Toleration for Linux nodes that can run user-deployed Longhorn components. + tolerations: + - key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" + # -- Node selector for Linux nodes that can run user-deployed Longhorn components. + nodeSelector: + kubernetes.io/os: "linux" + defaultSetting: + # -- Toleration for system-managed Longhorn components. + taintToleration: cattle.io/os=linux:NoSchedule + # -- Node selector for system-managed Longhorn components. + systemManagedComponentsNodeSelector: kubernetes.io/os:linux + +networkPolicies: + # -- Setting that allows you to enable network policies that control access to Longhorn pods. + enabled: false + # -- Distribution that determines the policy for allowing access for an ingress. (Options: "k3s", "rke2", "rke1") + type: "k3s" + +image: + longhorn: + engine: + # -- Repository for the Longhorn Engine image. + repository: longhornio/longhorn-engine + # -- Tag for the Longhorn Engine image. + tag: master-head + manager: + # -- Repository for the Longhorn Manager image. + repository: longhornio/longhorn-manager + # -- Tag for the Longhorn Manager image. + tag: master-head + ui: + # -- Repository for the Longhorn UI image. + repository: longhornio/longhorn-ui + # -- Tag for the Longhorn UI image. + tag: master-head + instanceManager: + # -- Repository for the Longhorn Instance Manager image. + repository: longhornio/longhorn-instance-manager + # -- Tag for the Longhorn Instance Manager image. + tag: master-head + shareManager: + # -- Repository for the Longhorn Share Manager image. + repository: longhornio/longhorn-share-manager + # -- Tag for the Longhorn Share Manager image. + tag: master-head + backingImageManager: + # -- Repository for the Backing Image Manager image. When unspecified, Longhorn uses the default value. + repository: longhornio/backing-image-manager + # -- Tag for the Backing Image Manager image. When unspecified, Longhorn uses the default value. + tag: master-head + supportBundleKit: + # -- Repository for the Longhorn Support Bundle Manager image. + repository: longhornio/support-bundle-kit + # -- Tag for the Longhorn Support Bundle Manager image. + tag: v0.0.36 + csi: + attacher: + # -- Repository for the CSI attacher image. When unspecified, Longhorn uses the default value. + repository: longhornio/csi-attacher + # -- Tag for the CSI attacher image. When unspecified, Longhorn uses the default value. + tag: v4.4.2 + provisioner: + # -- Repository for the CSI Provisioner image. When unspecified, Longhorn uses the default value. + repository: longhornio/csi-provisioner + # -- Tag for the CSI Provisioner image. When unspecified, Longhorn uses the default value. + tag: v3.6.2 + nodeDriverRegistrar: + # -- Repository for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. + repository: longhornio/csi-node-driver-registrar + # -- Tag for the CSI Node Driver Registrar image. When unspecified, Longhorn uses the default value. + tag: v2.9.2 + resizer: + # -- Repository for the CSI Resizer image. When unspecified, Longhorn uses the default value. + repository: longhornio/csi-resizer + # -- Tag for the CSI Resizer image. When unspecified, Longhorn uses the default value. + tag: v1.9.2 + snapshotter: + # -- Repository for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. + repository: longhornio/csi-snapshotter + # -- Tag for the CSI Snapshotter image. When unspecified, Longhorn uses the default value. + tag: v6.3.2 + livenessProbe: + # -- Repository for the CSI liveness probe image. When unspecified, Longhorn uses the default value. + repository: longhornio/livenessprobe + # -- Tag for the CSI liveness probe image. When unspecified, Longhorn uses the default value. + tag: v2.12.0 + openshift: + oauthProxy: + # -- Repository for the OAuth Proxy image. This setting applies only to OpenShift users. + repository: quay.io/openshift/origin-oauth-proxy + # -- Tag for the OAuth Proxy image. This setting applies only to OpenShift users. Specify OCP/OKD version 4.1 or later. The latest stable version is 4.14. + tag: 4.14 + # -- Image pull policy that applies to all user-deployed Longhorn components, such as Longhorn Manager, Longhorn driver, and Longhorn UI. + pullPolicy: IfNotPresent + +service: + ui: + # -- Service type for Longhorn UI. (Options: "ClusterIP", "NodePort", "LoadBalancer", "Rancher-Proxy") + type: ClusterIP + # -- NodePort port number for Longhorn UI. When unspecified, Longhorn selects a free port between 30000 and 32767. + nodePort: null + manager: + # -- Service type for Longhorn Manager. + type: ClusterIP + # -- NodePort port number for Longhorn Manager. When unspecified, Longhorn selects a free port between 30000 and 32767. + nodePort: "" + +persistence: + # -- Setting that allows you to specify the default Longhorn StorageClass. + defaultClass: true + # -- Filesystem type of the default Longhorn StorageClass. + defaultFsType: ext4 + # -- mkfs parameters of the default Longhorn StorageClass. + defaultMkfsParams: "" + # -- Replica count of the default Longhorn StorageClass. + defaultClassReplicaCount: 3 + # -- Data locality of the default Longhorn StorageClass. (Options: "disabled", "best-effort") + defaultDataLocality: disabled + # -- Reclaim policy that provides instructions for handling of a volume after its claim is released. (Options: "Retain", "Delete") + reclaimPolicy: Delete + # -- Setting that allows you to enable live migration of a Longhorn volume from one node to another. + migratable: false + # -- Set NFS mount options for Longhorn StorageClass for RWX volumes + nfsOptions: "" + recurringJobSelector: + # -- Setting that allows you to enable the recurring job selector for a Longhorn StorageClass. + enable: false + # -- Recurring job selector for a Longhorn StorageClass. Ensure that quotes are used correctly when specifying job parameters. (Example: `[{"name":"backup", "isGroup":true}]`) + jobList: [] + backingImage: + # -- Setting that allows you to use a backing image in a Longhorn StorageClass. + enable: false + # -- Backing image to be used for creating and restoring volumes in a Longhorn StorageClass. When no backing images are available, specify the data source type and parameters that Longhorn can use to create a backing image. + name: ~ + # -- Data source type of a backing image used in a Longhorn StorageClass. + # If the backing image exists in the cluster, Longhorn uses this setting to verify the image. + # If the backing image does not exist, Longhorn creates one using the specified data source type. + dataSourceType: ~ + # -- Data source parameters of a backing image used in a Longhorn StorageClass. + # You can specify a JSON string of a map. (Example: `'{\"url\":\"https://backing-image-example.s3-region.amazonaws.com/test-backing-image\"}'`) + dataSourceParameters: ~ + # -- Expected SHA-512 checksum of a backing image used in a Longhorn StorageClass. + expectedChecksum: ~ + defaultNodeSelector: + # -- Setting that allows you to enable the node selector for the default Longhorn StorageClass. + enable: false + # -- Node selector for the default Longhorn StorageClass. Longhorn uses only nodes with the specified tags for storing volume data. (Examples: "storage,fast") + selector: "" + # -- Setting that allows you to enable automatic snapshot removal during filesystem trim for a Longhorn StorageClass. (Options: "ignored", "enabled", "disabled") + removeSnapshotsDuringFilesystemTrim: ignored + +preUpgradeChecker: + # -- Setting that allows Longhorn to perform pre-upgrade checks. Disable this setting when installing Longhorn using Argo CD or other GitOps solutions. + jobEnabled: true + # -- Setting that allows Longhorn to perform upgrade version checks after starting the Longhorn Manager DaemonSet Pods. Disabling this setting also disables `preUpgradeChecker.jobEnabled`. Longhorn recommends keeping this setting enabled. + upgradeVersionCheck: true + +csi: + # -- kubelet root directory. When unspecified, Longhorn uses the default value. + kubeletRootDir: ~ + # -- Replica count of the CSI Attacher. When unspecified, Longhorn uses the default value ("3"). + attacherReplicaCount: ~ + # -- Replica count of the CSI Provisioner. When unspecified, Longhorn uses the default value ("3"). + provisionerReplicaCount: ~ + # -- Replica count of the CSI Resizer. When unspecified, Longhorn uses the default value ("3"). + resizerReplicaCount: ~ + # -- Replica count of the CSI Snapshotter. When unspecified, Longhorn uses the default value ("3"). + snapshotterReplicaCount: ~ + +defaultSettings: + # -- Endpoint used to access the backupstore. (Options: "NFS", "CIFS", "AWS", "GCP", "AZURE") + backupTarget: ~ + # -- Name of the Kubernetes secret associated with the backup target. + backupTargetCredentialSecret: ~ + # -- Setting that allows Longhorn to automatically attach a volume and create snapshots or backups when recurring jobs are run. + allowRecurringJobWhileVolumeDetached: ~ + # -- Setting that allows Longhorn to automatically create a default disk only on nodes with the label "node.longhorn.io/create-default-disk=true" (if no other disks exist). When this setting is disabled, Longhorn creates a default disk on each node that is added to the cluster. + createDefaultDiskLabeledNodes: ~ + # -- Default path for storing data on a host. The default value is "/var/lib/longhorn/". + defaultDataPath: ~ + # -- Default data locality. A Longhorn volume has data locality if a local replica of the volume exists on the same node as the pod that is using the volume. + defaultDataLocality: ~ + # -- Setting that allows scheduling on nodes with healthy replicas of the same volume. This setting is disabled by default. + replicaSoftAntiAffinity: ~ + # -- Setting that automatically rebalances replicas when an available node is discovered. + replicaAutoBalance: ~ + # -- Percentage of storage that can be allocated relative to hard drive capacity. The default value is "100". + storageOverProvisioningPercentage: ~ + # -- Percentage of minimum available disk capacity. When the minimum available capacity exceeds the total available capacity, the disk becomes unschedulable until more space is made available for use. The default value is "25". + storageMinimalAvailablePercentage: ~ + # -- Percentage of disk space that is not allocated to the default disk on each new Longhorn node. + storageReservedPercentageForDefaultDisk: ~ + # -- Upgrade Checker that periodically checks for new Longhorn versions. When a new version is available, a notification appears on the Longhorn UI. This setting is enabled by default + upgradeChecker: ~ + # -- Default number of replicas for volumes created using the Longhorn UI. For Kubernetes configuration, modify the `numberOfReplicas` field in the StorageClass. The default value is "3". + defaultReplicaCount: ~ + # -- Default Longhorn StorageClass. "storageClassName" is assigned to PVs and PVCs that are created for an existing Longhorn volume. "storageClassName" can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. The default value is "longhorn-static". + defaultLonghornStaticStorageClass: ~ + # -- Number of seconds that Longhorn waits before checking the backupstore for new backups. The default value is "300". When the value is "0", polling is disabled. + backupstorePollInterval: ~ + # -- Number of minutes that Longhorn keeps a failed backup resource. When the value is "0", automatic deletion is disabled. + failedBackupTTL: ~ + # -- Setting that restores recurring jobs from a backup volume on a backup target and creates recurring jobs if none exist during backup restoration. + restoreVolumeRecurringJobs: ~ + # -- Maximum number of successful recurring backup and snapshot jobs to be retained. When the value is "0", a history of successful recurring jobs is not retained. + recurringSuccessfulJobsHistoryLimit: ~ + # -- Maximum number of failed recurring backup and snapshot jobs to be retained. When the value is "0", a history of failed recurring jobs is not retained. + recurringFailedJobsHistoryLimit: ~ + # -- Maximum number of snapshots or backups to be retained. + recurringJobMaxRetention: ~ + # -- Maximum number of failed support bundles that can exist in the cluster. When the value is "0", Longhorn automatically purges all failed support bundles. + supportBundleFailedHistoryLimit: ~ + # -- Taint or toleration for system-managed Longhorn components. + taintToleration: ~ + # -- Node selector for system-managed Longhorn components. + systemManagedComponentsNodeSelector: ~ + # -- PriorityClass for system-managed Longhorn components. + # This setting can help prevent Longhorn components from being evicted under Node Pressure. + # Notice that this will be applied to Longhorn user-deployed components by default if there are no priority class values set yet, such as `longhornManager.priorityClass`. + priorityClass: &defaultPriorityClassNameRef "longhorn-critical" + # -- Setting that allows Longhorn to automatically salvage volumes when all replicas become faulty (for example, when the network connection is interrupted). Longhorn determines which replicas are usable and then uses these replicas for the volume. This setting is enabled by default. + autoSalvage: ~ + # -- Setting that allows Longhorn to automatically delete a workload pod that is managed by a controller (for example, daemonset) whenever a Longhorn volume is detached unexpectedly (for example, during Kubernetes upgrades). After deletion, the controller restarts the pod and then Kubernetes handles volume reattachment and remounting. + autoDeletePodWhenVolumeDetachedUnexpectedly: ~ + # -- Setting that prevents Longhorn Manager from scheduling replicas on a cordoned Kubernetes node. This setting is enabled by default. + disableSchedulingOnCordonedNode: ~ + # -- Setting that allows Longhorn to schedule new replicas of a volume to nodes in the same zone as existing healthy replicas. Nodes that do not belong to any zone are treated as existing in the zone that contains healthy replicas. When identifying zones, Longhorn relies on the label "topology.kubernetes.io/zone=" in the Kubernetes node object. + replicaZoneSoftAntiAffinity: ~ + # -- Setting that allows scheduling on disks with existing healthy replicas of the same volume. This setting is enabled by default. + replicaDiskSoftAntiAffinity: ~ + # -- Policy that defines the action Longhorn takes when a volume is stuck with a StatefulSet or Deployment pod on a node that failed. + nodeDownPodDeletionPolicy: ~ + # -- Policy that defines the action Longhorn takes when a node with the last healthy replica of a volume is drained. + nodeDrainPolicy: ~ + # -- Setting that allows automatic detaching of manually-attached volumes when a node is cordoned. + detachManuallyAttachedVolumesWhenCordoned: ~ + # -- Number of seconds that Longhorn waits before reusing existing data on a failed replica instead of creating a new replica of a degraded volume. + replicaReplenishmentWaitInterval: ~ + # -- Maximum number of replicas that can be concurrently rebuilt on each node. + concurrentReplicaRebuildPerNodeLimit: ~ + # -- Maximum number of volumes that can be concurrently restored on each node using a backup. When the value is "0", restoration of volumes using a backup is disabled. + concurrentVolumeBackupRestorePerNodeLimit: ~ + # -- Setting that disables the revision counter and thereby prevents Longhorn from tracking all write operations to a volume. When salvaging a volume, Longhorn uses properties of the "volume-head-xxx.img" file (the last file size and the last time the file was modified) to select the replica to be used for volume recovery. This setting applies only to volumes created using the Longhorn UI. + disableRevisionCounter: ~ + # -- Image pull policy for system-managed pods, such as Instance Manager, engine images, and CSI Driver. Changes to the image pull policy are applied only after the system-managed pods restart. + systemManagedPodsImagePullPolicy: ~ + # -- Setting that allows you to create and attach a volume without having all replicas scheduled at the time of creation. + allowVolumeCreationWithDegradedAvailability: ~ + # -- Setting that allows Longhorn to automatically clean up the system-generated snapshot after replica rebuilding is completed. + autoCleanupSystemGeneratedSnapshot: ~ + # -- Setting that allows Longhorn to automatically clean up the snapshot generated by a recurring backup job. + autoCleanupRecurringJobBackupSnapshot: ~ + # -- Maximum number of engines that are allowed to concurrently upgrade on each node after Longhorn Manager is upgraded. When the value is "0", Longhorn does not automatically upgrade volume engines to the new default engine image version. + concurrentAutomaticEngineUpgradePerNodeLimit: ~ + # -- Number of minutes that Longhorn waits before cleaning up the backing image file when no replicas in the disk are using it. + backingImageCleanupWaitInterval: ~ + # -- Number of seconds that Longhorn waits before downloading a backing image file again when the status of all image disk files changes to "failed" or "unknown". + backingImageRecoveryWaitInterval: ~ + # -- Percentage of the total allocatable CPU resources on each node to be reserved for each instance manager pod when the V1 Data Engine is enabled. The default value is "12". + guaranteedInstanceManagerCPU: ~ + # -- Setting that notifies Longhorn that the cluster is using the Kubernetes Cluster Autoscaler. + kubernetesClusterAutoscalerEnabled: ~ + # -- Setting that allows Longhorn to automatically delete an orphaned resource and the corresponding data (for example, stale replicas). Orphaned resources on failed or unknown nodes are not automatically cleaned up. + orphanAutoDeletion: ~ + # -- Storage network for in-cluster traffic. When unspecified, Longhorn uses the Kubernetes cluster network. + storageNetwork: ~ + # -- Flag that prevents accidental uninstallation of Longhorn. + deletingConfirmationFlag: ~ + # -- Timeout between the Longhorn Engine and replicas. Specify a value between "8" and "30" seconds. The default value is "8". + engineReplicaTimeout: ~ + # -- Setting that allows you to enable and disable snapshot hashing and data integrity checks. + snapshotDataIntegrity: ~ + # -- Setting that allows disabling of snapshot hashing after snapshot creation to minimize impact on system performance. + snapshotDataIntegrityImmediateCheckAfterSnapshotCreation: ~ + # -- Setting that defines when Longhorn checks the integrity of data in snapshot disk files. You must use the Unix cron expression format. + snapshotDataIntegrityCronjob: ~ + # -- Setting that allows Longhorn to automatically mark the latest snapshot and its parent files as removed during a filesystem trim. Longhorn does not remove snapshots containing multiple child files. + removeSnapshotsDuringFilesystemTrim: ~ + # -- Setting that allows fast rebuilding of replicas using the checksum of snapshot disk files. Before enabling this setting, you must set the snapshot-data-integrity value to "enable" or "fast-check". + fastReplicaRebuildEnabled: ~ + # -- Number of seconds that an HTTP client waits for a response from a File Sync server before considering the connection to have failed. + replicaFileSyncHttpClientTimeout: ~ + # -- Log levels that indicate the type and severity of logs in Longhorn Manager. The default value is "Info". (Options: "Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace") + logLevel: ~ + # -- Setting that allows you to specify a backup compression method. + backupCompressionMethod: ~ + # -- Maximum number of worker threads that can concurrently run for each backup. + backupConcurrentLimit: ~ + # -- Maximum number of worker threads that can concurrently run for each restore operation. + restoreConcurrentLimit: ~ + # -- Setting that allows you to enable the V1 Data Engine. + v1DataEngine: ~ + # -- Setting that allows you to enable the V2 Data Engine, which is based on the Storage Performance Development Kit (SPDK). The V2 Data Engine is a preview feature and should not be used in production environments. + v2DataEngine: ~ + # -- Setting that allows you to configure maximum huge page size (in MiB) for the V2 Data Engine. + v2DataEngineHugepageLimit: ~ + # -- Setting that allows rebuilding of offline replicas for volumes using the V2 Data Engine. + offlineReplicaRebuilding: ~ + # -- Number of millicpus on each node to be reserved for each Instance Manager pod when the V2 Data Engine is enabled. The default value is "1250". + v2DataEngineGuaranteedInstanceManagerCPU: ~ + # -- Setting that allows scheduling of empty node selector volumes to any node. + allowEmptyNodeSelectorVolume: ~ + # -- Setting that allows scheduling of empty disk selector volumes to any disk. + allowEmptyDiskSelectorVolume: ~ + # -- Setting that allows Longhorn to periodically collect anonymous usage data for product improvement purposes. Longhorn sends collected data to the [Upgrade Responder](https://github.com/longhorn/upgrade-responder) server, which is the data source of the Longhorn Public Metrics Dashboard (https://metrics.longhorn.io). The Upgrade Responder server does not store data that can be used to identify clients, including IP addresses. + allowCollectingLonghornUsageMetrics: ~ + # -- Setting that temporarily prevents all attempts to purge volume snapshots. + disableSnapshotPurge: ~ + # -- Maximum snapshot count for a volume. The value should be between 2 to 250 + snapshotMaxCount: ~ + # -- Setting that allows you to configure the log level of the SPDK target daemon (spdk_tgt) of the V2 Data Engine. + v2DataEngineLogLevel: ~ + # Setting that allows you to configure the log flags of the SPDK target daemon (spdk_tgt) of the V2 Data Engine. + v2DataEngineLogFlags: ~ + +privateRegistry: + # -- Setting that allows you to create a private registry secret. + createSecret: ~ + # -- URL of a private registry. When unspecified, Longhorn uses the default system registry. + registryUrl: ~ + # -- User account used for authenticating with a private registry. + registryUser: ~ + # -- Password for authenticating with a private registry. + registryPasswd: ~ + # -- Kubernetes secret that allows you to pull images from a private registry. This setting applies only when creation of private registry secrets is enabled. You must include the private registry name in the secret name. + registrySecret: ~ + +longhornManager: + log: + # -- Format of Longhorn Manager logs. (Options: "plain", "json") + format: plain + # -- PriorityClass for Longhorn Manager. + priorityClass: *defaultPriorityClassNameRef + # -- Toleration for Longhorn Manager on nodes allowed to run Longhorn Manager. + tolerations: [] + ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # -- Node selector for Longhorn Manager. Specify the nodes allowed to run Longhorn Manager. + nodeSelector: {} + ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + # -- Annotation for the Longhorn Manager service. + serviceAnnotations: {} + ## If you want to set annotations for the Longhorn Manager service, delete the `{}` in the line above + ## and uncomment this example block + # annotation-key1: "annotation-value1" + # annotation-key2: "annotation-value2" + +longhornDriver: + # -- PriorityClass for Longhorn Driver. + priorityClass: *defaultPriorityClassNameRef + # -- Toleration for Longhorn Driver on nodes allowed to run Longhorn components. + tolerations: [] + ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # -- Node selector for Longhorn Driver. Specify the nodes allowed to run Longhorn Driver. + nodeSelector: {} + ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +longhornUI: + # -- Replica count for Longhorn UI. + replicas: 2 + # -- PriorityClass for Longhorn UI. + priorityClass: *defaultPriorityClassNameRef + # -- Toleration for Longhorn UI on nodes allowed to run Longhorn components. + tolerations: [] + ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # -- Node selector for Longhorn UI. Specify the nodes allowed to run Longhorn UI. + nodeSelector: {} + ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +ingress: + # -- Setting that allows Longhorn to generate ingress records for the Longhorn UI service. + enabled: false + + # -- IngressClass resource that contains ingress configuration, including the name of the Ingress controller. + # ingressClassName can replace the kubernetes.io/ingress.class annotation used in earlier Kubernetes releases. + ingressClassName: ~ + + # -- Hostname of the Layer 7 load balancer. + host: sslip.io + + # -- Setting that allows you to enable TLS on ingress records. + tls: false + + # -- Setting that allows you to enable secure connections to the Longhorn UI service via port 443. + secureBackends: false + + # -- TLS secret that contains the private key and certificate to be used for TLS. This setting applies only when TLS is enabled on ingress records. + tlsSecret: longhorn.local-tls + + # -- Default ingress path. You can access the Longhorn UI by following the full ingress path {{host}}+{{path}}. + path: / + + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + # -- Ingress annotations in the form of key-value pairs. + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + # -- Secret that contains a TLS private key and certificate. Use secrets if you want to use your own certificates to secure ingresses. + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: longhorn.local-tls + # key: + # certificate: + +# -- Setting that allows you to enable pod security policies (PSPs) that allow privileged Longhorn pods to start. This setting applies only to clusters running Kubernetes 1.25 and earlier, and with the built-in Pod Security admission controller enabled. +enablePSP: false + +# -- Specify override namespace, specifically this is useful for using longhorn as sub-chart and its release namespace is not the `longhorn-system`. +namespaceOverride: "" + +# -- Annotation for the Longhorn Manager DaemonSet pods. This setting is optional. +annotations: {} + +serviceAccount: + # -- Annotations to add to the service account + annotations: {} + +metrics: + serviceMonitor: + # -- Setting that allows the creation of a Prometheus ServiceMonitor resource for Longhorn Manager components. + enabled: false + +## openshift settings +openshift: + # -- Setting that allows Longhorn to integrate with OpenShift. + enabled: false + ui: + # -- Route for connections between Longhorn and the OpenShift web console. + route: "longhorn-ui" + # -- Port for accessing the OpenShift web console. + port: 443 + # -- Port for proxy that provides access to the OpenShift web console. + proxy: 8443 + +# -- Setting that allows Longhorn to generate code coverage profiles. +enableGoCoverDir: false diff --git a/go.mod b/go.mod index 0818e0a2482..a37c2940919 100644 --- a/go.mod +++ b/go.mod @@ -57,7 +57,7 @@ require ( github.com/ehazlett/simplelog v0.0.0-20200226020431-d374894e92a4 github.com/emicklei/go-restful/v3 v3.11.3 github.com/gobuffalo/flect v1.0.2 - github.com/gorilla/mux v1.8.0 + github.com/gorilla/mux v1.8.1 github.com/guonaihong/gout v0.1.3 github.com/harvester/harvester-network-controller v0.3.1 github.com/harvester/node-manager v0.1.5-0.20230614075852-de2da3ef3aca @@ -67,8 +67,8 @@ require ( github.com/kube-logging/logging-operator/pkg/sdk v0.9.1 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/kubernetes/dashboard v1.10.1 - github.com/longhorn/backupstore v0.0.0-20240110081942-bd231cfb0c7b - github.com/longhorn/longhorn-manager v1.6.0 + github.com/longhorn/backupstore v0.0.0-20240219094812-3a87ee02df77 + github.com/longhorn/longhorn-manager v1.7.0-dev.0.20240402173422-6df051064711 github.com/mattn/go-isatty v0.0.17 github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 github.com/mitchellh/mapstructure v1.5.0 @@ -80,19 +80,19 @@ require ( github.com/rancher/apiserver v0.0.0-20230120214941-e88c32739dc7 github.com/rancher/dynamiclistener v0.3.6 github.com/rancher/fleet/pkg/apis v0.0.0-20230123175930-d296259590be - github.com/rancher/lasso v0.0.0-20230830164424-d684fdeb6f29 + github.com/rancher/lasso v0.0.0-20240123150939-7055397d6dfa github.com/rancher/norman v0.0.0-20221205184727-32ef2e185b99 github.com/rancher/rancher v0.0.0-20230124173128-2207cfed1803 github.com/rancher/rancher/pkg/apis v0.0.0 github.com/rancher/steve v0.0.0-20221209194631-acf9d31ce0dd github.com/rancher/system-upgrade-controller/pkg/apis v0.0.0-20230803010539-04a0b9ef5858 - github.com/rancher/wrangler v1.1.1 + github.com/rancher/wrangler v1.1.2 github.com/shopspring/decimal v1.3.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.9.0 github.com/tidwall/gjson v1.9.3 - github.com/urfave/cli v1.22.13 + github.com/urfave/cli v1.22.14 go.uber.org/multierr v1.11.0 golang.org/x/crypto v0.21.0 golang.org/x/net v0.22.0 @@ -101,9 +101,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.9.4 - k8s.io/api v0.28.5 + k8s.io/api v0.28.6 k8s.io/apiextensions-apiserver v0.26.10 - k8s.io/apimachinery v0.28.5 + k8s.io/apimachinery v0.29.3 k8s.io/apiserver v0.28.5 k8s.io/client-go v12.0.0+incompatible k8s.io/component-helpers v0.28.5 @@ -111,7 +111,7 @@ require ( k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 k8s.io/kubectl v0.25.0 k8s.io/kubelet v0.26.13 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 kubevirt.io/api v1.1.1 kubevirt.io/client-go v1.1.1 kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 @@ -126,18 +126,18 @@ require ( require ( emperror.dev/errors v0.8.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Masterminds/squirrel v1.5.3 // indirect - github.com/RoaringBitmap/roaring v1.2.3 // indirect + github.com/RoaringBitmap/roaring v1.9.1 // indirect github.com/adrg/xdg v0.3.1 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/aws/aws-sdk-go v1.46.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.12.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 // indirect @@ -178,10 +178,10 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.2 // indirect + github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/google/btree v1.0.1 // indirect github.com/google/fscrypt v0.3.4 // indirect @@ -191,9 +191,9 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/gorilla/handlers v1.5.1 // indirect - github.com/gorilla/websocket v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/websocket v1.5.1 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect @@ -211,19 +211,18 @@ require ( github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.6 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/longhorn/backing-image-manager v1.6.0-dev-20231217.0.20240103150452-7f8aea1edd03 // indirect - github.com/longhorn/go-common-libs v0.0.0-20240109042507-23627e6416b7 // indirect - github.com/longhorn/go-iscsi-helper v0.0.0-20240103085736-72aee873888a // indirect - github.com/longhorn/go-spdk-helper v0.0.0-20240117135122-26f8acb2a13d // indirect - github.com/longhorn/longhorn-engine v1.6.0-dev-20240105.0.20240110095344-deb8b18a1558 // indirect - github.com/longhorn/longhorn-instance-manager v1.6.0-rc2.0.20240126090453-5a27dd0e4a81 // indirect - github.com/longhorn/longhorn-share-manager v1.6.0-dev-20231217.0.20231226052309-99d57c1695ea // indirect - github.com/longhorn/longhorn-spdk-engine v0.0.0-20240123044045-c5f14845bd83 // indirect + github.com/longhorn/backing-image-manager v1.7.0-dev.0.20240326182459-c5288d745f4a // indirect + github.com/longhorn/go-common-libs v0.0.0-20240319112414-b75404dc7fbc // indirect + github.com/longhorn/go-iscsi-helper v0.0.0-20240308033847-bc3aab599425 // indirect + github.com/longhorn/go-spdk-helper v0.0.0-20240328085119-7ab2393959d9 // indirect + github.com/longhorn/longhorn-engine v1.6.0 // indirect + github.com/longhorn/longhorn-instance-manager v1.7.0-dev.0.20240308123529-28ec1b59c683 // indirect + github.com/longhorn/longhorn-share-manager v1.6.0 // indirect + github.com/longhorn/longhorn-spdk-engine v0.0.0-20240308061103-33861f26f36e // indirect github.com/machadovilaca/operator-observability v0.0.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.0 // indirect @@ -251,10 +250,10 @@ require ( github.com/pierrec/lz4/v4 v4.1.17 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.17.0 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_golang v1.18.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.47.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rancher/aks-operator v1.0.7 // indirect github.com/rancher/eks-operator v1.1.5 // indirect github.com/rancher/gke-operator v1.1.4 // indirect @@ -266,7 +265,7 @@ require ( github.com/rubenv/sql-migrate v1.1.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.7 // indirect + github.com/shirou/gopsutil/v3 v3.24.2 // indirect github.com/slok/goresilience v0.2.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -276,7 +275,7 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.1.0 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect @@ -288,17 +287,17 @@ require ( go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sys v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.19.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/grpc v1.62.1 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/cli-runtime v0.28.5 // indirect @@ -308,7 +307,7 @@ require ( k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-aggregator v0.26.4 // indirect k8s.io/kubernetes v1.28.5 // indirect - k8s.io/mount-utils v0.28.5 // indirect + k8s.io/mount-utils v0.29.3 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect oras.land/oras-go v1.2.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect diff --git a/go.sum b/go.sum index 619cbe7019a..65587680ccc 100644 --- a/go.sum +++ b/go.sum @@ -636,8 +636,8 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= @@ -670,8 +670,8 @@ github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8 github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= -github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= +github.com/RoaringBitmap/roaring v1.9.1 h1:LXcSqGGGMKm+KAzUyWn7ZeREqoOkoMX+KwLOK1thc4I= +github.com/RoaringBitmap/roaring v1.9.1/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -737,9 +737,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA= +github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -934,7 +933,6 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= @@ -1160,8 +1158,8 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgR github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1199,8 +1197,9 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -1285,8 +1284,8 @@ github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= @@ -1314,18 +1313,19 @@ github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU8 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -1531,26 +1531,26 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/longhorn/backing-image-manager v1.6.0-dev-20231217.0.20240103150452-7f8aea1edd03 h1:ZsWbMHBwM+fFewUunuyX8WlkAahL44qfbEUcHd73wmI= -github.com/longhorn/backing-image-manager v1.6.0-dev-20231217.0.20240103150452-7f8aea1edd03/go.mod h1:IH0mgbK+Dr13xkY+LhDaufyd9YIpiKqYo1AeRLFYGrk= -github.com/longhorn/backupstore v0.0.0-20240110081942-bd231cfb0c7b h1:euBbfDb6bnp8KQ5qmbvWWEG7KP6nxhUotLCPGU/x3v0= -github.com/longhorn/backupstore v0.0.0-20240110081942-bd231cfb0c7b/go.mod h1:4cbJWtlrD2cGTQxQLtdlPTYopiJiusXH7CpOBrn/s3k= -github.com/longhorn/go-common-libs v0.0.0-20240109042507-23627e6416b7 h1:t8F+BjCcn0qqHdVlGktqnEH5EOGigFnAFmVbQPe0nnY= -github.com/longhorn/go-common-libs v0.0.0-20240109042507-23627e6416b7/go.mod h1:nIECQARppamt2zwFSdzADRTVKo/7izFwWIS3VWi7D/s= -github.com/longhorn/go-iscsi-helper v0.0.0-20240103085736-72aee873888a h1:2MoRekaOOLU77EgqxGO/XEff2UlktC9mGnxE4rVAOgs= -github.com/longhorn/go-iscsi-helper v0.0.0-20240103085736-72aee873888a/go.mod h1:f5kDYhd77SGeNesKSzAlSmbEsTTf3xPUZDuLkoq7uxE= -github.com/longhorn/go-spdk-helper v0.0.0-20240117135122-26f8acb2a13d h1:Yqt7DL478ir9LwHmuRRXWzKKyzKbvPkGadCeRQ3pv1o= -github.com/longhorn/go-spdk-helper v0.0.0-20240117135122-26f8acb2a13d/go.mod h1:9nZ5HbwviggK6l792X4l9fTivEWmiK3sXFaroiRp2yw= -github.com/longhorn/longhorn-engine v1.6.0-dev-20240105.0.20240110095344-deb8b18a1558 h1:j2fUmyqimKGakd5WNf46bfgZV4q5IsgKzcl/oP7QRoA= -github.com/longhorn/longhorn-engine v1.6.0-dev-20240105.0.20240110095344-deb8b18a1558/go.mod h1:xRX2tg6Xaer0OonBnqA0wCruk/j51w3izR2J1+xHfiI= -github.com/longhorn/longhorn-instance-manager v1.6.0-rc2.0.20240126090453-5a27dd0e4a81 h1:VPGzIU7jv40X5zEBU947CdqxwV5N0e1PBhC+S7ty2cA= -github.com/longhorn/longhorn-instance-manager v1.6.0-rc2.0.20240126090453-5a27dd0e4a81/go.mod h1:ry4Ewu9vQVOFSIraAzxEyw0kUaDpbLLbOcFYUaEIrB0= -github.com/longhorn/longhorn-manager v1.6.0 h1:tIU9Dtl5564jIxPM9qaax8Fvbd+FSDX8HQYH7U5E4vE= -github.com/longhorn/longhorn-manager v1.6.0/go.mod h1:+v8jWi4y8leBGIdmUuopYIesx6o9Ih7bQC4JzFOCqaM= -github.com/longhorn/longhorn-share-manager v1.6.0-dev-20231217.0.20231226052309-99d57c1695ea h1:bYNEyl0yAh76fjzTQEo7+u2G80XaVi8aXfMlwyPiZmI= -github.com/longhorn/longhorn-share-manager v1.6.0-dev-20231217.0.20231226052309-99d57c1695ea/go.mod h1:sDo6d4FV5nALnXkZHRY31H6R6KTK0LNo0kMVULlhYiE= -github.com/longhorn/longhorn-spdk-engine v0.0.0-20240123044045-c5f14845bd83 h1:7onMWr23AzUC6C7uSpe0lfi0J2FPhrxtExGlYEyFCxc= -github.com/longhorn/longhorn-spdk-engine v0.0.0-20240123044045-c5f14845bd83/go.mod h1:+qobpc6BOqFxXjyEksxuSjfSI6BEJF7L630GWP9wyOk= +github.com/longhorn/backing-image-manager v1.7.0-dev.0.20240326182459-c5288d745f4a h1:Z/vdszQazSZJ51IlJvtEuDViNn6AShJx6xGcOwwa9Ho= +github.com/longhorn/backing-image-manager v1.7.0-dev.0.20240326182459-c5288d745f4a/go.mod h1:LuPH4YQ7+phuzSXEPT8b1/iBUsOvmnsCaogU4EImy+k= +github.com/longhorn/backupstore v0.0.0-20240219094812-3a87ee02df77 h1:iJRq59kA22f9HIjFtY/lz5rKCorZJrrYXju70XoWdmE= +github.com/longhorn/backupstore v0.0.0-20240219094812-3a87ee02df77/go.mod h1:4cbJWtlrD2cGTQxQLtdlPTYopiJiusXH7CpOBrn/s3k= +github.com/longhorn/go-common-libs v0.0.0-20240319112414-b75404dc7fbc h1:Eh9Npc5yBcVD8E4zVQIGUtC62HcfqevrHjQ2kh7fJ/E= +github.com/longhorn/go-common-libs v0.0.0-20240319112414-b75404dc7fbc/go.mod h1:ESTw7LYBF+dB5VndQNKXKrD6B9s/hF94lotGKXLovlM= +github.com/longhorn/go-iscsi-helper v0.0.0-20240308033847-bc3aab599425 h1:koSD52H0VkzJAh3OIZCdgQ9mqoRXklkeuhqmuwQ1WzU= +github.com/longhorn/go-iscsi-helper v0.0.0-20240308033847-bc3aab599425/go.mod h1:2aM6KBix3Khd56I4rihOBOOPOm0/YvYMjtr1KNclQsI= +github.com/longhorn/go-spdk-helper v0.0.0-20240328085119-7ab2393959d9 h1:spgyLW1ND0GZJkzFyfJvbvcjVtAwBU2Msg+Jy2Yy02k= +github.com/longhorn/go-spdk-helper v0.0.0-20240328085119-7ab2393959d9/go.mod h1:WGm84AyXymx7L0CqOw8KEr9okywD+Cj5xZi+eKeOoiU= +github.com/longhorn/longhorn-engine v1.6.0 h1:6CH2vvwCgFBIGW4TegcI79CL1Ego1nvLZIC3ioRjjdM= +github.com/longhorn/longhorn-engine v1.6.0/go.mod h1:Snkv3gy4AUOhZSYMI7g7lVX/OOB8DTo28eJwsEfbAwM= +github.com/longhorn/longhorn-instance-manager v1.7.0-dev.0.20240308123529-28ec1b59c683 h1:v7JKvBbdKKsqGV96oDY7eTOodbX8bLNyPlcOnI+vanM= +github.com/longhorn/longhorn-instance-manager v1.7.0-dev.0.20240308123529-28ec1b59c683/go.mod h1:paydDjcI9whUJd09/4QJQVDV9MSJDwXUD1872vhnREo= +github.com/longhorn/longhorn-manager v1.7.0-dev.0.20240402173422-6df051064711 h1:mDtWKgQCT/bTKajln/+CDQkVNnNEcMpK30XqlCL1NVI= +github.com/longhorn/longhorn-manager v1.7.0-dev.0.20240402173422-6df051064711/go.mod h1:VpyCicse68m62JU9IU97JValGeQO1me/aSX9mKDmoUc= +github.com/longhorn/longhorn-share-manager v1.6.0 h1:rmvwCWOeADdf67uw5Dsv7m5m0aFbIxERNk0rYl3DuVk= +github.com/longhorn/longhorn-share-manager v1.6.0/go.mod h1:akAmB/ZKonalweOYEhbg85X2vhAz+/sMzg779EFgPO8= +github.com/longhorn/longhorn-spdk-engine v0.0.0-20240308061103-33861f26f36e h1:OPtYaSleQOlf+VsCF3hzS96nYj1F+X0dwky+LvsLh1M= +github.com/longhorn/longhorn-spdk-engine v0.0.0-20240308061103-33861f26f36e/go.mod h1:mgXCx3ZJpaObXmOUgczwzuYySs5Wc7SQK6sJtIRR8Ho= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -1618,8 +1618,6 @@ github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4 github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 h1:YocNLcTBdEdvY3iDK6jfWXvEaM5OCKkjxPKoJRdB3Gg= github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1849,8 +1847,8 @@ github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqr github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1858,8 +1856,8 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -1872,8 +1870,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1889,8 +1887,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200507164740-ecee9c8abfd1/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= @@ -1920,8 +1918,8 @@ github.com/rancher/lasso v0.0.0-20200427171700-e0509f89f319/go.mod h1:6Dw19z1lDI github.com/rancher/lasso v0.0.0-20200515155337-a34e1e26ad91/go.mod h1:G6Vv2aj6xB2YjTVagmu4NkhBvbE8nBcGykHRENH6arI= github.com/rancher/lasso v0.0.0-20200820172840-0e4cc0ef5cb0/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18= github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08/go.mod h1:9qZd/S8DqWzfKtjKGgSoHqGEByYmUE3qRaBaaAHwfEM= -github.com/rancher/lasso v0.0.0-20230830164424-d684fdeb6f29 h1:+kige/h8/LnzWgPjB5NUIHz/pWiW/lFpqcTUkN5uulY= -github.com/rancher/lasso v0.0.0-20230830164424-d684fdeb6f29/go.mod h1:kgk9kJVMj9FIrrXU0iyM6u/9Je4bEjPImqswkTVaKsQ= +github.com/rancher/lasso v0.0.0-20240123150939-7055397d6dfa h1:eRhvQJjIpPxJunlS3e1J3qTghUy9MIrMjQa2aXYSC3k= +github.com/rancher/lasso v0.0.0-20240123150939-7055397d6dfa/go.mod h1:utdskbIL7kdVvPCUFPEJQDWJwPHGFpUCRfVkX2G2Xxg= github.com/rancher/norman v0.0.0-20210423002317-8e6ffc77a819/go.mod h1:hhnf77V2lmZD7cvUqi4vTBpIs3KpHNL/AmuN0MqEClI= github.com/rancher/norman v0.0.0-20221205184727-32ef2e185b99 h1:+Oob+DG+SZoX8hnKBWpZfVwxx6K84hMou9nbBOTux7w= github.com/rancher/norman v0.0.0-20221205184727-32ef2e185b99/go.mod h1:zpv7z4ySYL5LlEBKEPf/xf3cjx837/J2i/wHpT43viE= @@ -1945,8 +1943,8 @@ github.com/rancher/wrangler v0.6.2-0.20200820173016-2068de651106/go.mod h1:iKqQc github.com/rancher/wrangler v0.8.3/go.mod h1:dKEaHNB4izxmPUtpq1Hvr3z3Oh+9k5pCZyFO9sUhlaY= github.com/rancher/wrangler v0.8.10/go.mod h1:Lte9WjPtGYxYacIWeiS9qawvu2R4NujFU9xuXWJvc/0= github.com/rancher/wrangler v0.8.11-0.20211214201934-f5aa5d9f2e81/go.mod h1:Lte9WjPtGYxYacIWeiS9qawvu2R4NujFU9xuXWJvc/0= -github.com/rancher/wrangler v1.1.1 h1:wmqUwqc2M7ADfXnBCJTFkTB5ZREWpD78rnZMzmxwMvM= -github.com/rancher/wrangler v1.1.1/go.mod h1:ioVbKupzcBOdzsl55MvEDN0R1wdGggj8iNCYGFI5JvM= +github.com/rancher/wrangler v1.1.2 h1:oXbXo9k7y/H4drUpb4RM1c++vT9O3rpoNEfyusGykiU= +github.com/rancher/wrangler v1.1.2/go.mod h1:2k9MyhlBdjcutcBGoOJSUAz0HgDAXnMjv81d3n/AaQc= github.com/rancher/wrangler-api v0.6.1-0.20200427172631-a7c2f09b783e/go.mod h1:2lcWR98q8HU3U4mVETnXc8quNG0uXxrt8vKd6cAa/30= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1990,8 +1988,8 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= -github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= +github.com/shirou/gopsutil/v3 v3.24.2 h1:kcR0erMbLg5/3LcInpw0X/rrPSqq4CDPyI6A6ZRC18Y= +github.com/shirou/gopsutil/v3 v3.24.2/go.mod h1:tSg/594BcA+8UdQU2XcW803GWYgdtauFFPgJCJKZlVk= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -2076,7 +2074,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= @@ -2091,8 +2088,8 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -2111,8 +2108,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.13 h1:wsLILXG8qCJNse/qAgLNf23737Cx05GflHg/PJGe1Ok= -github.com/urfave/cli v1.22.13/go.mod h1:VufqObjsMTF2BBwKawpx9R8eAneNEWhoO0yx8Vd+FkE= +github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= +github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= @@ -2140,8 +2137,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.7 h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE= @@ -2236,8 +2233,9 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -2481,8 +2479,8 @@ golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2642,7 +2640,6 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3037,16 +3034,16 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= +google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o= -google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -3097,8 +3094,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -3240,8 +3237,9 @@ k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/api v1.1.1 h1:vt5bOpACArNFIudx1bcE1VeejQdh5wCd7Oz/uFBIkH8= kubevirt.io/api v1.1.1/go.mod h1:CJ4vZsaWhVN3jNbyc9y3lIZhw8nUHbWjap0xHABQiqc= kubevirt.io/client-go v1.1.1 h1:X/fk9kLW65aHRM3GW71UIzXLZsALPoggt4786yLYz1g= diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 0ca1dc4fee5..4d38f3bfcec 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -91,7 +91,7 @@ const ( // UnmarshalText method. See the Unmarshaler example for a demonstration with // email addresses. // -// ### Key mapping +// # Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go struct. // The special `toml` struct tag can be used to map TOML keys to struct fields @@ -248,7 +248,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index c6af3f239dd..b9e309717ea 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -5,17 +5,25 @@ import ( "io" ) +// TextMarshaler is an alias for encoding.TextMarshaler. +// // Deprecated: use encoding.TextMarshaler type TextMarshaler encoding.TextMarshaler +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// // Deprecated: use MetaData.PrimitiveDecode. func PrimitiveDecode(primValue Primitive, v interface{}) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// // Deprecated: use NewDecoder(reader).Decode(&value). func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 930e1d521ac..9cd25d75718 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -136,7 +136,8 @@ func NewEncoder(w io.Writer) *Encoder { // document. func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { return err } return enc.w.Flush() @@ -457,6 +458,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // @@ -471,17 +482,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - if is32Bit { - copyStart := make([]int, len(start)) - copy(copyStart, start) - fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) } } } @@ -490,24 +491,27 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { writeFields := func(fields [][]int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + fieldVal := rv.FieldByIndex(fieldIndex) - if isNil(fieldVal) { /// Don't write anything for nil fields. + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { continue } - opts := getOptions(fieldType.Tag) - if opts.skip { + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && enc.isEmpty(fieldVal) { - continue - } if opts.omitzero && isZero(fieldVal) { continue } @@ -649,7 +653,7 @@ func isZero(rv reflect.Value) bool { return false } -func (enc *Encoder) isEmpty(rv reflect.Value) bool { +func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 @@ -664,13 +668,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool { // type b struct{ s []string } // s := a{field: b{s: []string{"AAA"}}} for i := 0; i < rv.NumField(); i++ { - if !enc.isEmpty(rv.Field(i)) { + if !isEmpty(rv.Field(i)) { return false } } return true case reflect.Bool: return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() } return false } @@ -693,8 +699,11 @@ func (enc *Encoder) newline() { // v v v v vv // key = {k = 1, k2 = 2} func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. if len(key) == 0 { - encPanic(errNoKey) + enc.eElement(val) + return } enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index f4f390e647f..efd68865bba 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -84,7 +84,7 @@ func (pe ParseError) Error() string { pe.Position.Line, pe.LastKey, msg) } -// ErrorWithUsage() returns the error with detailed location context. +// ErrorWithPosition returns the error with detailed location context. // // See the documentation on [ParseError]. func (pe ParseError) ErrorWithPosition() string { @@ -124,7 +124,7 @@ func (pe ParseError) ErrorWithPosition() string { return b.String() } -// ErrorWithUsage() returns the error with detailed location context and usage +// ErrorWithUsage returns the error with detailed location context and usage // guidance. // // See the documentation on [ParseError]. diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index d4d70871d8d..3545a6ad66d 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -46,12 +46,13 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -87,13 +88,14 @@ func (lx *lexer) nextItem() item { } } -func lex(input string) *lexer { +func lex(input string, tomlNext bool) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, } return lx } @@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r) { + if isBareKeyChar(r, lx.tomlNext) { return lexBareName } lx.backup() @@ -618,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValue) @@ -640,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValueEnd) @@ -648,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } return lx.errorf("trailing comma not allowed in inline tables") } return lexInlineTableValue @@ -770,8 +781,8 @@ func lexRawString(lx *lexer) stateFn { } } -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning ''' has already been consumed and +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() @@ -828,6 +839,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn { func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough case 'b': fallthrough case 't': @@ -846,6 +862,11 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '\\': return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape case 'u': return lexShortUnicodeEscape case 'U': @@ -854,6 +875,19 @@ func lexStringEscape(lx *lexer) stateFn { return lx.error(errLexEscape{r}) } +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected two hexadecimal digits after '\x', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { @@ -1225,7 +1259,23 @@ func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHexadecimal(r rune) bool { return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') } -func isBareKeyChar(r rune) bool { + +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 71847a04158..2e78b24e952 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -106,7 +106,7 @@ func (k Key) maybeQuoted(i int) string { return `""` } for _, c := range k[i] { - if !isBareKeyChar(c) { + if !isBareKeyChar(c, false) { return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index d2542d6f926..9c191536986 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,7 @@ package toml import ( "fmt" + "os" "strconv" "strings" "time" @@ -15,6 +16,7 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. + tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. @@ -29,6 +31,8 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -41,9 +45,12 @@ func parse(data string) (p *parser, err error) { }() // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] } // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 @@ -65,9 +72,10 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - lx: lex(data), + lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), + tomlNext: tomlNext, } for { item := p.next() @@ -194,12 +202,12 @@ func (p *parser) topLevel(item item) { for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set value. vItem := p.next() val, typ := p.value(vItem, false) p.set(p.currentKey, val, typ, vItem.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -236,7 +244,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { case itemString: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -331,11 +339,17 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { var dtTypes = []struct { fmt string zone *time.Location + next bool }{ - {time.RFC3339Nano, time.Local}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, - {"2006-01-02", internal.LocalDate}, - {"15:04:05.999999999", internal.LocalTime}, + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, } func (p *parser) valueDatetime(it item) (interface{}, tomlType) { @@ -346,6 +360,9 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { err error ) for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { ok = true @@ -384,6 +401,7 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? + _ = types } return array, tomlArray } @@ -426,11 +444,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set the value. val, typ := p.value(p.next(), false) p.set(p.currentKey, val, typ, it.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) hash[p.currentKey] = val /// Restore context. @@ -551,7 +569,6 @@ func (p *parser) addContext(key Key, array bool) { func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { p.setValue(key, val) p.setType(key, typ, pos) - } // setValue sets the given key to the given value in the current context. @@ -632,14 +649,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) { // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } -func (p *parser) addImplicitContext(key Key) { - p.addImplicit(key) - p.addContext(key, false) -} +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } // current returns the full key name of the current context. func (p *parser) current() string { @@ -662,49 +676,54 @@ func stripFirstNewline(s string) string { return s } -// Remove newlines inside triple-quoted strings if a line ends with "\". +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. func (p *parser) stripEscapedNewlines(s string) string { - split := strings.Split(s, "\n") - if len(split) < 1 { - return s - } - - escNL := false // Keep track of the last non-blank line was escaped. - for i, line := range split { - line = strings.TrimRight(line, " \t\r") - - if len(line) == 0 || line[len(line)-1] != '\\' { - split[i] = strings.TrimRight(split[i], "\r") - if !escNL && i != len(split)-1 { - split[i] += "\n" - } - continue + var b strings.Builder + var i int + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() } + i += ix - escBS := true - for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { - escBS = !escBS + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue } - if escNL { - line = strings.TrimLeft(line, " \t\r") + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } } - escNL = !escBS - - if escBS { - split[i] += "\n" + if j == i+1 { + // Not a whitespace escape. + i++ continue } - - if i == len(split)-1 { - p.panicf("invalid escape: '\\ '") - } - - split[i] = line[:len(line)-1] // Remove \ - if len(split)-1 > i { - split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. + // (It's a bad escape sequence, but we can let + // replaceEscapes catch it.) + i++ + continue } + b.WriteString(s[:i]) + s = s[j:] + i = 0 } - return strings.Join(split, "") } func (p *parser) replaceEscapes(it item, str string) string { @@ -743,12 +762,23 @@ func (p *parser) replaceEscapes(it item, str string) string { case 'r': replaced = append(replaced, rune(0x000D)) r += 1 + case 'e': + if p.tomlNext { + replaced = append(replaced, rune(0x001B)) + r += 1 + } case '"': replaced = append(replaced, rune(0x0022)) r += 1 case '\\': replaced = append(replaced, rune(0x005C)) r += 1 + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) + replaced = append(replaced, escaped) + r += 3 + } case 'u': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this diff --git a/vendor/github.com/RoaringBitmap/roaring/.drone.yml b/vendor/github.com/RoaringBitmap/roaring/.drone.yml index 698cd0e7a7b..7936bfe8dfd 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.drone.yml +++ b/vendor/github.com/RoaringBitmap/roaring/.drone.yml @@ -11,7 +11,6 @@ steps: commands: - go get -t - go test - - go test -race -run TestConcurrent* - go build -tags appengine - go test -tags appengine - GOARCH=386 go build diff --git a/vendor/github.com/RoaringBitmap/roaring/Makefile b/vendor/github.com/RoaringBitmap/roaring/Makefile deleted file mode 100644 index 0a4f9f0aae5..00000000000 --- a/vendor/github.com/RoaringBitmap/roaring/Makefile +++ /dev/null @@ -1,107 +0,0 @@ -.PHONY: help all test format fmtcheck vet lint qa deps clean nuke ser fetch-real-roaring-datasets - - - - - - - - -# Display general help about this command -help: - @echo "" - @echo "The following commands are available:" - @echo "" - @echo " make qa : Run all the tests" - @echo " make test : Run the unit tests" - @echo "" - @echo " make format : Format the source code" - @echo " make fmtcheck : Check if the source code has been formatted" - @echo " make vet : Check for suspicious constructs" - @echo " make lint : Check for style errors" - @echo "" - @echo " make deps : Get the dependencies" - @echo " make clean : Remove any build artifact" - @echo " make nuke : Deletes any intermediate file" - @echo "" - @echo " make fuzz-smat : Fuzzy testing with smat" - @echo " make fuzz-stream : Fuzzy testing with stream deserialization" - @echo " make fuzz-buffer : Fuzzy testing with buffer deserialization" - @echo "" - -# Alias for help target -all: help -test: - go test - go test -race -run TestConcurrent* -# Format the source code -format: - @find ./ -type f -name "*.go" -exec gofmt -w {} \; - -# Check if the source code has been formatted -fmtcheck: - @mkdir -p target - @find ./ -type f -name "*.go" -exec gofmt -d {} \; | tee target/format.diff - @test ! -s target/format.diff || { echo "ERROR: the source code has not been formatted - please use 'make format' or 'gofmt'"; exit 1; } - -# Check for syntax errors -vet: - GOPATH=$(GOPATH) go vet ./... - -# Check for style errors -lint: - GOPATH=$(GOPATH) PATH=$(GOPATH)/bin:$(PATH) golint ./... - - - - - -# Alias to run all quality-assurance checks -qa: fmtcheck test vet lint - -# --- INSTALL --- - -# Get the dependencies -deps: - GOPATH=$(GOPATH) go get github.com/stretchr/testify - GOPATH=$(GOPATH) go get github.com/bits-and-blooms/bitset - GOPATH=$(GOPATH) go get github.com/golang/lint/golint - GOPATH=$(GOPATH) go get github.com/mschoch/smat - GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz - GOPATH=$(GOPATH) go get github.com/dvyukov/go-fuzz/go-fuzz-build - GOPATH=$(GOPATH) go get github.com/glycerine/go-unsnap-stream - GOPATH=$(GOPATH) go get github.com/philhofer/fwd - GOPATH=$(GOPATH) go get github.com/jtolds/gls - -fuzz-smat: - go test -tags=gofuzz -run=TestGenerateSmatCorpus - go-fuzz-build -func FuzzSmat github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - - -fuzz-stream: - go-fuzz-build -func FuzzSerializationStream github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - - -fuzz-buffer: - go-fuzz-build -func FuzzSerializationBuffer github.com/RoaringBitmap/roaring - go-fuzz -bin=./roaring-fuzz.zip -workdir=workdir/ -timeout=200 - -# Remove any build artifact -clean: - GOPATH=$(GOPATH) go clean ./... - -# Deletes any intermediate file -nuke: - rm -rf ./target - GOPATH=$(GOPATH) go clean -i ./... - -cover: - go test -coverprofile=coverage.out - go tool cover -html=coverage.out - -fetch-real-roaring-datasets: - # pull github.com/RoaringBitmap/real-roaring-datasets -> testdata/real-roaring-datasets - git submodule init - git submodule update diff --git a/vendor/github.com/RoaringBitmap/roaring/README.md b/vendor/github.com/RoaringBitmap/roaring/README.md index 753b8068b41..acd3058b776 100644 --- a/vendor/github.com/RoaringBitmap/roaring/README.md +++ b/vendor/github.com/RoaringBitmap/roaring/README.md @@ -1,5 +1,7 @@ -roaring [![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring/roaring64) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) -[![Build Status](https://cloud.drone.io/api/badges/RoaringBitmap/roaring/status.svg)](https://cloud.drone.io/RoaringBitmap/roaring) +# roaring + +[![GoDoc](https://godoc.org/github.com/RoaringBitmap/roaring?status.svg)](https://godoc.org/github.com/RoaringBitmap/roaring) [![Go Report Card](https://goreportcard.com/badge/RoaringBitmap/roaring)](https://goreportcard.com/report/github.com/RoaringBitmap/roaring) + ![Go-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-CI/badge.svg) ![Go-ARM-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-ARM-CI/badge.svg) ![Go-Windows-CI](https://github.com/RoaringBitmap/roaring/workflows/Go-Windows-CI/badge.svg) @@ -31,17 +33,17 @@ Roaring bitmaps are found to work well in many important applications: The ``roaring`` Go library is used by * [anacrolix/torrent] -* [runv](https://github.com/hyperhq/runv) * [InfluxDB](https://www.influxdata.com) * [Pilosa](https://www.pilosa.com/) * [Bleve](http://www.blevesearch.com) +* [Weaviate](https://github.com/weaviate/weaviate) * [lindb](https://github.com/lindb/lindb) * [Elasticell](https://github.com/deepfabric/elasticell) * [SourceGraph](https://github.com/sourcegraph/sourcegraph) * [M3](https://github.com/m3db/m3) * [trident](https://github.com/NetApp/trident) * [Husky](https://www.datadoghq.com/blog/engineering/introducing-husky/) - +* [FrostDB](https://github.com/polarsignals/frostdb) This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com). @@ -99,7 +101,7 @@ whether you like it or not. That can become very wasteful. This being said, there are definitively cases where attempting to use compressed bitmaps is wasteful. For example, if you have a small universe size. E.g., your bitmaps represent sets of integers -from [0,n) where n is small (e.g., n=64 or n=128). If you are able to uncompressed BitSet and +from [0,n) where n is small (e.g., n=64 or n=128). If you can use uncompressed BitSet and it does not blow up your memory usage, then compressed bitmaps are probably not useful to you. In fact, if you do not need compression, then a BitSet offers remarkable speed. @@ -134,7 +136,7 @@ There is a big problem with these formats however that can hurt you badly in som Roaring solves this problem. It works in the following manner. It divides the data into chunks of 216 integers (e.g., [0, 216), [216, 2 x 216), ...). Within a chunk, it can use an uncompressed bitmap, a simple list of integers, -or a list of runs. Whatever format it uses, they all allow you to check for the present of any one value quickly +or a list of runs. Whatever format it uses, they all allow you to check for the presence of any one value quickly (e.g., with a binary search). The net result is that Roaring can compute many operations much faster than run-length-encoded formats like WAH, EWAH, Concise... Maybe surprisingly, Roaring also generally offers better compression ratios. diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go index 9541fd53693..80fa676ef64 100644 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go @@ -17,8 +17,17 @@ func (ac *arrayContainer) String() string { } func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uint32) int { + if i < 0 { + panic("negative index") + } + if len(ac.content) == 0 { + return i + } + _ = x[len(ac.content)-1+i] + _ = ac.content[len(ac.content)-1] for k := 0; k < len(ac.content); k++ { - x[k+i] = uint32(ac.content[k]) | mask + x[k+i] = + uint32(ac.content[k]) | mask } return i + len(ac.content) } @@ -655,10 +664,54 @@ func (ac *arrayContainer) iandNot(a container) container { } func (ac *arrayContainer) iandNotRun16(rc *runContainer16) container { - rcb := rc.toBitmapContainer() - acb := ac.toBitmapContainer() - acb.iandNotBitmapSurely(rcb) - *ac = *(acb.toArrayContainer()) + // Fast path: if either the array container or the run container is empty, the result is the array. + if ac.isEmpty() || rc.isEmpty() { + // Empty + return ac + } + // Fast path: if the run container is full, the result is empty. + if rc.isFull() { + ac.content = ac.content[:0] + return ac + } + current_run := 0 + // All values in [start_run, end_end] are part of the run + start_run := rc.iv[current_run].start + end_end := start_run + rc.iv[current_run].length + // We are going to read values in the array at index i, and we are + // going to write them at index pos. So we do in-place processing. + // We always have that pos <= i by construction. So we can either + // overwrite a value just read, or a value that was previous read. + pos := 0 + i := 0 + for ; i < len(ac.content); i++ { + if ac.content[i] < start_run { + // the value in the array appears before the run [start_run, end_end] + ac.content[pos] = ac.content[i] + pos++ + } else if ac.content[i] <= end_end { + // nothing to do, the value is in the array but also in the run. + } else { + // We have the value in the array after the run. We cannot tell + // whether we need to keep it or not. So let us move to another run. + if current_run+1 < len(rc.iv) { + current_run++ + start_run = rc.iv[current_run].start + end_end = start_run + rc.iv[current_run].length + i-- // retry with the same i + } else { + // We have exhausted the number of runs. We can keep the rest of the values + // from i to len(ac.content) - 1 inclusively. + break // We are done, the rest of the array will be kept + } + } + } + for ; i < len(ac.content); i++ { + ac.content[pos] = ac.content[i] + pos++ + } + // We 'shink' the slice. + ac.content = ac.content[:pos] return ac } diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go index 71029f4ff76..35e68438167 100644 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go @@ -1062,7 +1062,6 @@ func (bc *bitmapContainer) PrevSetBit(i int) int { // reference the java implementation // https://github.com/RoaringBitmap/RoaringBitmap/blob/master/src/main/java/org/roaringbitmap/BitmapContainer.java#L875-L892 -// func (bc *bitmapContainer) numberOfRuns() int { if bc.cardinality == 0 { return 0 diff --git a/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go b/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go index 3e5490a9dfc..d5ebb91ab7b 100644 --- a/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go +++ b/vendor/github.com/RoaringBitmap/roaring/internal/byte_input.go @@ -10,6 +10,11 @@ type ByteInput interface { // Next returns a slice containing the next n bytes from the buffer, // advancing the buffer as if the bytes had been returned by Read. Next(n int) ([]byte, error) + // NextReturnsSafeSlice returns true if Next() returns a safe slice as opposed + // to a slice that points to an underlying buffer possibly owned by another system. + // When NextReturnsSafeSlice returns false, the result from Next() should be copied + // before it is modified (i.e., it is immutable). + NextReturnsSafeSlice() bool // ReadUInt32 reads uint32 with LittleEndian order ReadUInt32() (uint32, error) // ReadUInt16 reads uint16 with LittleEndian order @@ -42,6 +47,25 @@ type ByteBuffer struct { off int } +// NewByteBuffer creates a new ByteBuffer. +func NewByteBuffer(buf []byte) *ByteBuffer { + return &ByteBuffer{ + buf: buf, + } +} + +var _ io.Reader = (*ByteBuffer)(nil) + +// Read implements io.Reader. +func (b *ByteBuffer) Read(p []byte) (int, error) { + data, err := b.Next(len(p)) + if err != nil { + return 0, err + } + copy(p, data) + return len(data), nil +} + // Next returns a slice containing the next n bytes from the reader // If there are fewer bytes than the given n, io.ErrUnexpectedEOF will be returned func (b *ByteBuffer) Next(n int) ([]byte, error) { @@ -57,6 +81,12 @@ func (b *ByteBuffer) Next(n int) ([]byte, error) { return data, nil } +// NextReturnsSafeSlice returns false since ByteBuffer might hold +// an array owned by some other systems. +func (b *ByteBuffer) NextReturnsSafeSlice() bool { + return false +} + // ReadUInt32 reads uint32 with LittleEndian order func (b *ByteBuffer) ReadUInt32() (uint32, error) { if len(b.buf)-b.off < 4 { @@ -109,26 +139,45 @@ func (b *ByteBuffer) Reset(buf []byte) { type ByteInputAdapter struct { r io.Reader readBytes int + buf [4]byte +} + +var _ io.Reader = (*ByteInputAdapter)(nil) + +// Read implements io.Reader. +func (b *ByteInputAdapter) Read(buf []byte) (int, error) { + m, err := io.ReadAtLeast(b.r, buf, len(buf)) + b.readBytes += m + + if err != nil { + return 0, err + } + + return m, nil } // Next returns a slice containing the next n bytes from the buffer, // advancing the buffer as if the bytes had been returned by Read. func (b *ByteInputAdapter) Next(n int) ([]byte, error) { buf := make([]byte, n) - m, err := io.ReadAtLeast(b.r, buf, n) - b.readBytes += m + _, err := b.Read(buf) if err != nil { return nil, err } - return buf, nil } +// NextReturnsSafeSlice returns true since ByteInputAdapter always returns a slice +// allocated with make([]byte, ...) +func (b *ByteInputAdapter) NextReturnsSafeSlice() bool { + return true +} + // ReadUInt32 reads uint32 with LittleEndian order func (b *ByteInputAdapter) ReadUInt32() (uint32, error) { - buf, err := b.Next(4) - + buf := b.buf[:4] + _, err := b.Read(buf) if err != nil { return 0, err } @@ -138,8 +187,8 @@ func (b *ByteInputAdapter) ReadUInt32() (uint32, error) { // ReadUInt16 reads uint16 with LittleEndian order func (b *ByteInputAdapter) ReadUInt16() (uint16, error) { - buf, err := b.Next(2) - + buf := b.buf[:2] + _, err := b.Read(buf) if err != nil { return 0, err } diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go index 7220da272c0..a31cdbd9e8a 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaring.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaring.go @@ -13,6 +13,7 @@ import ( "strconv" "github.com/RoaringBitmap/roaring/internal" + "github.com/bits-and-blooms/bitset" ) // Bitmap represents a compressed bitmap where you can add integers. @@ -53,17 +54,186 @@ func (rb *Bitmap) ToBytes() ([]byte, error) { return rb.highlowcontainer.toBytes() } +const wordSize = uint64(64) +const log2WordSize = uint64(6) +const capacity = ^uint64(0) +const bitmapContainerSize = (1 << 16) / 64 // bitmap size in words + +// DenseSize returns the size of the bitmap when stored as a dense bitmap. +func (rb *Bitmap) DenseSize() uint64 { + if rb.highlowcontainer.size() == 0 { + return 0 + } + + maximum := 1 + uint64(rb.Maximum()) + if maximum > (capacity - wordSize + 1) { + return uint64(capacity >> log2WordSize) + } + + return uint64((maximum + (wordSize - 1)) >> log2WordSize) +} + +// ToDense returns a slice of uint64s representing the bitmap as a dense bitmap. +// Useful to convert a roaring bitmap to a format that can be used by other libraries +// like https://github.com/bits-and-blooms/bitset or https://github.com/kelindar/bitmap +func (rb *Bitmap) ToDense() []uint64 { + sz := rb.DenseSize() + if sz == 0 { + return nil + } + + bitmap := make([]uint64, sz) + rb.WriteDenseTo(bitmap) + return bitmap +} + +// FromDense creates a bitmap from a slice of uint64s representing the bitmap as a dense bitmap. +// Useful to convert bitmaps from libraries like https://github.com/bits-and-blooms/bitset or +// https://github.com/kelindar/bitmap into roaring bitmaps fast and with convenience. +// +// This function will not create any run containers, only array and bitmap containers. It's up to +// the caller to call RunOptimize if they want to further compress the runs of consecutive values. +// +// When doCopy is true, the bitmap is copied into a new slice for each bitmap container. +// This is useful when the bitmap is going to be modified after this function returns or if it's +// undesirable to hold references to large bitmaps which the GC would not be able to collect. +// One copy can still happen even when doCopy is false if the bitmap length is not divisible +// by bitmapContainerSize. +// +// See also FromBitSet. +func FromDense(bitmap []uint64, doCopy bool) *Bitmap { + sz := (len(bitmap) + bitmapContainerSize - 1) / bitmapContainerSize // round up + rb := &Bitmap{ + highlowcontainer: roaringArray{ + containers: make([]container, 0, sz), + keys: make([]uint16, 0, sz), + needCopyOnWrite: make([]bool, 0, sz), + }, + } + rb.FromDense(bitmap, doCopy) + return rb +} + +// FromDense unmarshalls from a slice of uint64s representing the bitmap as a dense bitmap. +// Useful to convert bitmaps from libraries like https://github.com/bits-and-blooms/bitset or +// https://github.com/kelindar/bitmap into roaring bitmaps fast and with convenience. +// Callers are responsible for ensuring that the bitmap is empty before calling this function. +// +// This function will not create any run containers, only array and bitmap containers. It is up to +// the caller to call RunOptimize if they want to further compress the runs of consecutive values. +// +// When doCopy is true, the bitmap is copied into a new slice for each bitmap container. +// This is useful when the bitmap is going to be modified after this function returns or if it's +// undesirable to hold references to large bitmaps which the GC would not be able to collect. +// One copy can still happen even when doCopy is false if the bitmap length is not divisible +// by bitmapContainerSize. +// +// See FromBitSet. +func (rb *Bitmap) FromDense(bitmap []uint64, doCopy bool) { + if len(bitmap) == 0 { + return + } + + var k uint16 + const size = bitmapContainerSize + + for len(bitmap) > 0 { + hi := size + if len(bitmap) < size { + hi = len(bitmap) + } + + words := bitmap[:hi] + count := int(popcntSlice(words)) + + switch { + case count > arrayDefaultMaxSize: + c := &bitmapContainer{cardinality: count, bitmap: words} + cow := true + + if doCopy || len(words) < size { + c.bitmap = make([]uint64, size) + copy(c.bitmap, words) + cow = false + } + + rb.highlowcontainer.appendContainer(k, c, cow) + + case count > 0: + c := &arrayContainer{content: make([]uint16, count)} + var pos, base int + for _, w := range words { + for w != 0 { + t := w & -w + c.content[pos] = uint16(base + int(popcount(t-1))) + pos++ + w ^= t + } + base += 64 + } + rb.highlowcontainer.appendContainer(k, c, false) + } + + bitmap = bitmap[hi:] + k++ + } +} + +// WriteDenseTo writes to a slice of uint64s representing the bitmap as a dense bitmap. +// Callers are responsible for allocating enough space in the bitmap using DenseSize. +// Useful to convert a roaring bitmap to a format that can be used by other libraries +// like https://github.com/bits-and-blooms/bitset or https://github.com/kelindar/bitmap +func (rb *Bitmap) WriteDenseTo(bitmap []uint64) { + for i, ct := range rb.highlowcontainer.containers { + hb := uint32(rb.highlowcontainer.keys[i]) << 16 + + switch c := ct.(type) { + case *arrayContainer: + for _, x := range c.content { + n := int(hb | uint32(x)) + bitmap[n>>log2WordSize] |= uint64(1) << uint(x%64) + } + + case *bitmapContainer: + copy(bitmap[int(hb)>>log2WordSize:], c.bitmap) + + case *runContainer16: + for j := range c.iv { + start := uint32(c.iv[j].start) + end := start + uint32(c.iv[j].length) + 1 + lo := int(hb|start) >> log2WordSize + hi := int(hb|(end-1)) >> log2WordSize + + if lo == hi { + bitmap[lo] |= (^uint64(0) << uint(start%64)) & + (^uint64(0) >> (uint(-end) % 64)) + continue + } + + bitmap[lo] |= ^uint64(0) << uint(start%64) + for n := lo + 1; n < hi; n++ { + bitmap[n] = ^uint64(0) + } + bitmap[hi] |= ^uint64(0) >> (uint(-end) % 64) + } + default: + panic("unsupported container type") + } + } +} + // Checksum computes a hash (currently FNV-1a) for a bitmap that is suitable for // using bitmaps as elements in hash sets or as keys in hash maps, as well as // generally quicker comparisons. // The implementation is biased towards efficiency in little endian machines, so // expect some extra CPU cycles and memory to be used if your machine is big endian. -// Likewise, don't use this to verify integrity unless you're certain you'll load -// the bitmap on a machine with the same endianess used to create it. +// Likewise, do not use this to verify integrity unless you are certain you will load +// the bitmap on a machine with the same endianess used to create it. (Thankfully +// very few people use big endian machines these days.) func (rb *Bitmap) Checksum() uint64 { const ( offset = 14695981039346656037 - prime = 1099511628211 + prime = 1099511628211 ) var bytes []byte @@ -106,6 +276,20 @@ func (rb *Bitmap) Checksum() uint64 { return hash } +// FromUnsafeBytes reads a serialized version of this bitmap from the byte buffer without copy. +// It is the caller's responsibility to ensure that the input data is not modified and remains valid for the entire lifetime of this bitmap. +// This method avoids small allocations but holds references to the input data buffer. It is GC-friendly, but it may consume more memory eventually. +// The containers in the resulting bitmap are immutable containers tied to the provided byte array and they rely on +// copy-on-write which means that modifying them creates copies. Thus FromUnsafeBytes is more likely to be appropriate for read-only use cases, +// when the resulting bitmap can be considered immutable. +// +// See also the FromBuffer function. +// See https://github.com/RoaringBitmap/roaring/pull/395 for more details. +func (rb *Bitmap) FromUnsafeBytes(data []byte, cookieHeader ...byte) (p int64, err error) { + stream := internal.NewByteBuffer(data) + return rb.ReadFrom(stream) +} + // ReadFrom reads a serialized version of this bitmap from stream. // The format is compatible with other RoaringBitmap // implementations (Java, C) and is documented here: @@ -114,12 +298,18 @@ func (rb *Bitmap) Checksum() uint64 { // So add cookieHeader to accept the 4-byte data that has been read in roaring64.ReadFrom. // It is not necessary to pass cookieHeader when call roaring.ReadFrom to read the roaring32 data directly. func (rb *Bitmap) ReadFrom(reader io.Reader, cookieHeader ...byte) (p int64, err error) { - stream := internal.ByteInputAdapterPool.Get().(*internal.ByteInputAdapter) - stream.Reset(reader) + stream, ok := reader.(internal.ByteInput) + if !ok { + byteInputAdapter := internal.ByteInputAdapterPool.Get().(*internal.ByteInputAdapter) + byteInputAdapter.Reset(reader) + stream = byteInputAdapter + } p, err = rb.highlowcontainer.readFrom(stream, cookieHeader...) - internal.ByteInputAdapterPool.Put(stream) + if !ok { + internal.ByteInputAdapterPool.Put(stream.(*internal.ByteInputAdapter)) + } return } @@ -139,12 +329,17 @@ func (rb *Bitmap) ReadFrom(reader io.Reader, cookieHeader ...byte) (p int64, err // You should *not* change the copy-on-write status of the resulting // bitmaps (SetCopyOnWrite). // +// Thus FromBuffer is more likely to be appropriate for read-only use cases, +// when the resulting bitmap can be considered immutable. +// // If buf becomes unavailable, then a bitmap created with // FromBuffer would be effectively broken. Furthermore, any // bitmap derived from this bitmap (e.g., via Or, And) might // also be broken. Thus, before making buf unavailable, you should // call CloneCopyOnWriteContainers on all such bitmaps. // +// See also the FromUnsafeBytes function which can have better performance +// in some cases. func (rb *Bitmap) FromBuffer(buf []byte) (p int64, err error) { stream := internal.ByteBufferPool.Get().(*internal.ByteBuffer) stream.Reset(buf) @@ -194,6 +389,16 @@ func (rb *Bitmap) Clear() { rb.highlowcontainer.clear() } +// ToBitSet copies the content of the RoaringBitmap into a bitset.BitSet instance +func (rb *Bitmap) ToBitSet() *bitset.BitSet { + return bitset.From(rb.ToDense()) +} + +// FromBitSet creates a new RoaringBitmap from a bitset.BitSet instance +func FromBitSet(bitset *bitset.BitSet) *Bitmap { + return FromDense(bitset.Bytes(), false) +} + // ToArray creates a new slice containing all of the integers stored in the Bitmap in sorted order func (rb *Bitmap) ToArray() []uint32 { array := make([]uint32, rb.GetCardinality()) @@ -233,7 +438,7 @@ func BoundSerializedSizeInBytes(cardinality uint64, universeSize uint64) uint64 contnbr := (universeSize + uint64(65535)) / uint64(65536) if contnbr > cardinality { contnbr = cardinality - // we can't have more containers than we have values + // we cannot have more containers than we have values } headermax := 8*contnbr + 4 if 4 > (contnbr+7)/8 { @@ -276,9 +481,9 @@ type intIterator struct { // This way, instead of making up-to 64k allocations per full iteration // we get a single allocation and simply reinitialize the appropriate // iterator and point to it in the generic `iter` member on each key bound. - shortIter shortIterator - runIter runIterator16 - bitmapIter bitmapContainerShortIterator + shortIter shortIterator + runIter runIterator16 + bitmapIter bitmapContainerShortIterator } // HasNext returns true if there are more integers to iterate over @@ -341,14 +546,13 @@ func (ii *intIterator) AdvanceIfNeeded(minval uint32) { // IntIterator is meant to allow you to iterate through the values of a bitmap, see Initialize(a *Bitmap) type IntIterator = intIterator - // Initialize configures the existing iterator so that it can iterate through the values of // the provided bitmap. // The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). -func (p *intIterator) Initialize(a *Bitmap) { - p.pos = 0 - p.highlowcontainer = &a.highlowcontainer - p.init() +func (ii *intIterator) Initialize(a *Bitmap) { + ii.pos = 0 + ii.highlowcontainer = &a.highlowcontainer + ii.init() } type intReverseIterator struct { @@ -357,9 +561,9 @@ type intReverseIterator struct { iter shortIterable highlowcontainer *roaringArray - shortIter reverseIterator - runIter runReverseIterator16 - bitmapIter reverseBitmapContainerShortIterator + shortIter reverseIterator + runIter runReverseIterator16 + bitmapIter reverseBitmapContainerShortIterator } // HasNext returns true if there are more integers to iterate over @@ -414,10 +618,10 @@ type IntReverseIterator = intReverseIterator // Initialize configures the existing iterator so that it can iterate through the values of // the provided bitmap. // The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). -func (p *intReverseIterator) Initialize(a *Bitmap) { - p.highlowcontainer = &a.highlowcontainer - p.pos = a.highlowcontainer.size() - 1 - p.init() +func (ii *intReverseIterator) Initialize(a *Bitmap) { + ii.highlowcontainer = &a.highlowcontainer + ii.pos = a.highlowcontainer.size() - 1 + ii.init() } // ManyIntIterable allows you to iterate over the values in a Bitmap @@ -434,9 +638,9 @@ type manyIntIterator struct { iter manyIterable highlowcontainer *roaringArray - shortIter shortIterator - runIter runIterator16 - bitmapIter bitmapContainerManyIterator + shortIter shortIterator + runIter runIterator16 + bitmapIter bitmapContainerManyIterator } func (ii *manyIntIterator) init() { @@ -495,17 +699,16 @@ func (ii *manyIntIterator) NextMany64(hs64 uint64, buf []uint64) int { return n } - // ManyIntIterator is meant to allow you to iterate through the values of a bitmap, see Initialize(a *Bitmap) type ManyIntIterator = manyIntIterator // Initialize configures the existing iterator so that it can iterate through the values of // the provided bitmap. // The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). -func (p *manyIntIterator) Initialize(a *Bitmap) { - p.pos = 0 - p.highlowcontainer = &a.highlowcontainer - p.init() +func (ii *manyIntIterator) Initialize(a *Bitmap) { + ii.pos = 0 + ii.highlowcontainer = &a.highlowcontainer + ii.init() } // String creates a string representation of the Bitmap @@ -569,7 +772,7 @@ func (rb *Bitmap) Iterate(cb func(x uint32) bool) { // Iterator creates a new IntPeekable to iterate over the integers contained in the bitmap, in sorted order; // the iterator becomes invalid if the bitmap is modified (e.g., with Add or Remove). func (rb *Bitmap) Iterator() IntPeekable { - p := new(intIterator) + p := new(intIterator) p.Initialize(rb) return p } @@ -847,7 +1050,7 @@ func (rb *Bitmap) Select(x uint32) (uint32, error) { return uint32(key)<<16 + uint32(c.selectInt(uint16(remaining))), nil } } - return 0, fmt.Errorf("can't find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) + return 0, fmt.Errorf("cannot find %dth integer in a bitmap with only %d items", x, rb.GetCardinality()) } // And computes the intersection between two bitmaps and stores the result in the current bitmap diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go index eeb3d313159..079195ddaf1 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go @@ -4,8 +4,9 @@ import ( "bytes" "encoding/binary" "fmt" - "github.com/RoaringBitmap/roaring/internal" "io" + + "github.com/RoaringBitmap/roaring/internal" ) type container interface { @@ -112,9 +113,10 @@ func newRoaringArray() *roaringArray { // runOptimize compresses the element containers to minimize space consumed. // Q: how does this interact with copyOnWrite and needCopyOnWrite? // A: since we aren't changing the logical content, just the representation, -// we don't bother to check the needCopyOnWrite bits. We replace -// (possibly all) elements of ra.containers in-place with space -// optimized versions. +// +// we don't bother to check the needCopyOnWrite bits. We replace +// (possibly all) elements of ra.containers in-place with space +// optimized versions. func (ra *roaringArray) runOptimize() { for i := range ra.containers { ra.containers[i] = ra.containers[i].toEfficientContainer() @@ -465,9 +467,7 @@ func (ra *roaringArray) serializedSizeInBytes() uint64 { return answer } -// // spec: https://github.com/RoaringBitmap/RoaringFormatSpec -// func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) { hasRun := ra.hasRunCompression() isRunSizeInBytes := 0 @@ -544,15 +544,14 @@ func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) { return n, nil } -// // spec: https://github.com/RoaringBitmap/RoaringFormatSpec -// func (ra *roaringArray) toBytes() ([]byte, error) { var buf bytes.Buffer _, err := ra.writeTo(&buf) return buf.Bytes(), err } +// Reads a serialized roaringArray from a byte slice. func (ra *roaringArray) readFrom(stream internal.ByteInput, cookieHeader ...byte) (int64, error) { var cookie uint32 var err error @@ -567,6 +566,8 @@ func (ra *roaringArray) readFrom(stream internal.ByteInput, cookieHeader ...byte return stream.GetReadBytes(), fmt.Errorf("error in roaringArray.readFrom: could not read initial cookie: %s", err) } } + // If NextReturnsSafeSlice is false, then willNeedCopyOnWrite should be true + willNeedCopyOnWrite := !stream.NextReturnsSafeSlice() var size uint32 var isRunBitmap []byte @@ -631,7 +632,7 @@ func (ra *roaringArray) readFrom(stream internal.ByteInput, cookieHeader ...byte key := keycard[2*i] card := int(keycard[2*i+1]) + 1 ra.keys[i] = key - ra.needCopyOnWrite[i] = true + ra.needCopyOnWrite[i] = willNeedCopyOnWrite if isRunBitmap != nil && isRunBitmap[i/8]&(1<<(i%8)) != 0 { // run container diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go index 4ce48a294ca..7098ba28fca 100644 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go @@ -47,6 +47,7 @@ import ( // runContainer16 does run-length encoding of sets of // uint16 integers. type runContainer16 struct { + // iv is a slice of sorted, non-overlapping, non-adjacent intervals. iv []interval16 } @@ -253,10 +254,8 @@ func newRunContainer16FromBitmapContainer(bc *bitmapContainer) *runContainer16 { } -// // newRunContainer16FromArray populates a new // runContainer16 from the contents of arr. -// func newRunContainer16FromArray(arr *arrayContainer) *runContainer16 { // keep this in sync with newRunContainer16FromVals above @@ -834,24 +833,23 @@ func (rc *runContainer16) numIntervals() int { // If key is not already present, then whichInterval16 is // set as follows: // -// a) whichInterval16 == len(rc.iv)-1 if key is beyond our -// last interval16 in rc.iv; +// a) whichInterval16 == len(rc.iv)-1 if key is beyond our +// last interval16 in rc.iv; // -// b) whichInterval16 == -1 if key is before our first -// interval16 in rc.iv; +// b) whichInterval16 == -1 if key is before our first +// interval16 in rc.iv; // -// c) whichInterval16 is set to the minimum index of rc.iv -// which comes strictly before the key; -// so rc.iv[whichInterval16].last < key, -// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start -// (Note that whichInterval16+1 won't exist when -// whichInterval16 is the last interval.) +// c) whichInterval16 is set to the minimum index of rc.iv +// which comes strictly before the key; +// so rc.iv[whichInterval16].last < key, +// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start +// (Note that whichInterval16+1 won't exist when +// whichInterval16 is the last interval.) // // runContainer16.search always returns whichInterval16 < len(rc.iv). // // The search space is from startIndex to endxIndex. If endxIndex is set to zero, then there // no upper bound. -// func (rc *runContainer16) searchRange(key int, startIndex int, endxIndex int) (whichInterval16 int, alreadyPresent bool, numCompares int) { n := int(len(rc.iv)) if n == 0 { @@ -937,21 +935,20 @@ func (rc *runContainer16) searchRange(key int, startIndex int, endxIndex int) (w // If key is not already present, then whichInterval16 is // set as follows: // -// a) whichInterval16 == len(rc.iv)-1 if key is beyond our -// last interval16 in rc.iv; +// a) whichInterval16 == len(rc.iv)-1 if key is beyond our +// last interval16 in rc.iv; // -// b) whichInterval16 == -1 if key is before our first -// interval16 in rc.iv; +// b) whichInterval16 == -1 if key is before our first +// interval16 in rc.iv; // -// c) whichInterval16 is set to the minimum index of rc.iv -// which comes strictly before the key; -// so rc.iv[whichInterval16].last < key, -// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start -// (Note that whichInterval16+1 won't exist when -// whichInterval16 is the last interval.) +// c) whichInterval16 is set to the minimum index of rc.iv +// which comes strictly before the key; +// so rc.iv[whichInterval16].last < key, +// and if whichInterval16+1 exists, then key < rc.iv[whichInterval16+1].start +// (Note that whichInterval16+1 won't exist when +// whichInterval16 is the last interval.) // // runContainer16.search always returns whichInterval16 < len(rc.iv). -// func (rc *runContainer16) search(key int) (whichInterval16 int, alreadyPresent bool, numCompares int) { return rc.searchRange(key, 0, 0) } @@ -994,7 +991,6 @@ func newRunContainer16() *runContainer16 { // newRunContainer16CopyIv creates a run container, initializing // with a copy of the supplied iv slice. -// func newRunContainer16CopyIv(iv []interval16) *runContainer16 { rc := &runContainer16{ iv: make([]interval16, len(iv)), @@ -1011,7 +1007,6 @@ func (rc *runContainer16) Clone() *runContainer16 { // newRunContainer16TakeOwnership returns a new runContainer16 // backed by the provided iv slice, which we will // assume exclusive control over from now on. -// func newRunContainer16TakeOwnership(iv []interval16) *runContainer16 { rc := &runContainer16{ iv: iv, @@ -2006,7 +2001,6 @@ func (rc *runContainer16) not(firstOfRange, endx int) container { // Current routine is correct but // makes 2 more passes through the arrays than should be // strictly necessary. Measure both ways though--this may not matter. -// func (rc *runContainer16) Not(firstOfRange, endx int) *runContainer16 { if firstOfRange > endx { @@ -2329,7 +2323,6 @@ func runArrayUnionToRuns(rc *runContainer16, ac *arrayContainer) ([]interval16, // the backing array, and then you write // the answer at the beginning. What this // trick does is minimize memory allocations. -// func (rc *runContainer16) lazyIOR(a container) container { // not lazy at the moment return rc.ior(a) diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization.go b/vendor/github.com/RoaringBitmap/roaring/serialization.go index 70e3bbcc57f..dbfecc846d4 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization.go @@ -7,7 +7,6 @@ import ( // writeTo for runContainer16 follows this // spec: https://github.com/RoaringBitmap/RoaringFormatSpec -// func (b *runContainer16) writeTo(stream io.Writer) (int, error) { buf := make([]byte, 2+4*len(b.iv)) binary.LittleEndian.PutUint16(buf[0:], uint16(len(b.iv))) diff --git a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go index 2e4ea595439..6e3a5d554ce 100644 --- a/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go +++ b/vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go @@ -79,12 +79,12 @@ func (bc *bitmapContainer) asLittleEndianByteSlice() []byte { // Deserialization code follows -//// +// // // These methods (byteSliceAsUint16Slice,...) do not make copies, // they are pointer-based (unsafe). The caller is responsible to // ensure that the input slice does not get garbage collected, deleted // or modified while you hold the returned slince. -//// +// // func byteSliceAsUint16Slice(slice []byte) (result []uint16) { // here we create a new slice holder if len(slice)%2 != 0 { panic("Slice size should be divisible by 2") @@ -295,7 +295,6 @@ func byteSliceAsBoolSlice(slice []byte) (result []bool) { // bitmap derived from this bitmap (e.g., via Or, And) might // also be broken. Thus, before making buf unavailable, you should // call CloneCopyOnWriteContainers on all such bitmaps. -// func (rb *Bitmap) FrozenView(buf []byte) error { return rb.highlowcontainer.frozenView(buf) } @@ -313,7 +312,7 @@ func (rb *Bitmap) FrozenView(buf []byte) error { * uint8_t[num_containers] *
uint32_t * - *
is a 4-byte value which is a bit union of FROZEN_COOKIE (15 bits) + *
is a 4-byte value which is a bit union of frozenCookie (15 bits) * and the number of containers (17 bits). * * stores number of elements for every container. @@ -329,43 +328,50 @@ func (rb *Bitmap) FrozenView(buf []byte) error { * All members have their native alignments during deserilization except
, * which is not guaranteed to be aligned by 4 bytes. */ -const FROZEN_COOKIE = 13766 +const frozenCookie = 13766 var ( - FrozenBitmapInvalidCookie = errors.New("header does not contain the FROZEN_COOKIE") - FrozenBitmapBigEndian = errors.New("loading big endian frozen bitmaps is not supported") - FrozenBitmapIncomplete = errors.New("input buffer too small to contain a frozen bitmap") - FrozenBitmapOverpopulated = errors.New("too many containers") - FrozenBitmapUnexpectedData = errors.New("spurious data in input") - FrozenBitmapInvalidTypecode = errors.New("unrecognized typecode") - FrozenBitmapBufferTooSmall = errors.New("buffer too small") + // ErrFrozenBitmapInvalidCookie is returned when the header does not contain the frozenCookie. + ErrFrozenBitmapInvalidCookie = errors.New("header does not contain the frozenCookie") + // ErrFrozenBitmapBigEndian is returned when the header is big endian. + ErrFrozenBitmapBigEndian = errors.New("loading big endian frozen bitmaps is not supported") + // ErrFrozenBitmapIncomplete is returned when the buffer is too small to contain a frozen bitmap. + ErrFrozenBitmapIncomplete = errors.New("input buffer too small to contain a frozen bitmap") + // ErrFrozenBitmapOverpopulated is returned when the number of containers is too large. + ErrFrozenBitmapOverpopulated = errors.New("too many containers") + // ErrFrozenBitmapUnexpectedData is returned when the buffer contains unexpected data. + ErrFrozenBitmapUnexpectedData = errors.New("spurious data in input") + // ErrFrozenBitmapInvalidTypecode is returned when the typecode is invalid. + ErrFrozenBitmapInvalidTypecode = errors.New("unrecognized typecode") + // ErrFrozenBitmapBufferTooSmall is returned when the buffer is too small. + ErrFrozenBitmapBufferTooSmall = errors.New("buffer too small") ) func (ra *roaringArray) frozenView(buf []byte) error { if len(buf) < 4 { - return FrozenBitmapIncomplete + return ErrFrozenBitmapIncomplete } headerBE := binary.BigEndian.Uint32(buf[len(buf)-4:]) - if headerBE&0x7fff == FROZEN_COOKIE { - return FrozenBitmapBigEndian + if headerBE&0x7fff == frozenCookie { + return ErrFrozenBitmapBigEndian } header := binary.LittleEndian.Uint32(buf[len(buf)-4:]) buf = buf[:len(buf)-4] - if header&0x7fff != FROZEN_COOKIE { - return FrozenBitmapInvalidCookie + if header&0x7fff != frozenCookie { + return ErrFrozenBitmapInvalidCookie } nCont := int(header >> 15) if nCont > (1 << 16) { - return FrozenBitmapOverpopulated + return ErrFrozenBitmapOverpopulated } // 1 byte per type, 2 bytes per key, 2 bytes per count. if len(buf) < 5*nCont { - return FrozenBitmapIncomplete + return ErrFrozenBitmapIncomplete } types := buf[len(buf)-nCont:] @@ -390,12 +396,12 @@ func (ra *roaringArray) frozenView(buf []byte) error { nRun++ nRunEl += int(counts[i]) default: - return FrozenBitmapInvalidTypecode + return ErrFrozenBitmapInvalidTypecode } } if len(buf) < (1<<13)*nBitmap+4*nRunEl+2*nArrayEl { - return FrozenBitmapIncomplete + return ErrFrozenBitmapIncomplete } bitsetsArena := byteSliceAsUint64Slice(buf[:(1<<13)*nBitmap]) @@ -408,15 +414,15 @@ func (ra *roaringArray) frozenView(buf []byte) error { buf = buf[2*nArrayEl:] if len(buf) != 0 { - return FrozenBitmapUnexpectedData + return ErrFrozenBitmapUnexpectedData } var c container - containersSz := int(unsafe.Sizeof(c))*nCont - bitsetsSz := int(unsafe.Sizeof(bitmapContainer{}))*nBitmap - arraysSz := int(unsafe.Sizeof(arrayContainer{}))*nArray - runsSz := int(unsafe.Sizeof(runContainer16{}))*nRun - needCOWSz := int(unsafe.Sizeof(true))*nCont + containersSz := int(unsafe.Sizeof(c)) * nCont + bitsetsSz := int(unsafe.Sizeof(bitmapContainer{})) * nBitmap + arraysSz := int(unsafe.Sizeof(arrayContainer{})) * nArray + runsSz := int(unsafe.Sizeof(runContainer16{})) * nRun + needCOWSz := int(unsafe.Sizeof(true)) * nCont bitmapArenaSz := containersSz + bitsetsSz + arraysSz + runsSz + needCOWSz bitmapArena := make([]byte, bitmapArenaSz) @@ -475,9 +481,10 @@ func (ra *roaringArray) frozenView(buf []byte) error { return nil } -func (bm *Bitmap) GetFrozenSizeInBytes() uint64 { +// GetFrozenSizeInBytes returns the size in bytes of the frozen bitmap. +func (rb *Bitmap) GetFrozenSizeInBytes() uint64 { nBits, nArrayEl, nRunEl := uint64(0), uint64(0), uint64(0) - for _, c := range bm.highlowcontainer.containers { + for _, c := range rb.highlowcontainer.containers { switch v := c.(type) { case *bitmapContainer: nBits++ @@ -487,19 +494,21 @@ func (bm *Bitmap) GetFrozenSizeInBytes() uint64 { nRunEl += uint64(len(v.iv)) } } - return 4 + 5*uint64(len(bm.highlowcontainer.containers)) + + return 4 + 5*uint64(len(rb.highlowcontainer.containers)) + (nBits << 13) + 2*nArrayEl + 4*nRunEl } -func (bm *Bitmap) Freeze() ([]byte, error) { - sz := bm.GetFrozenSizeInBytes() +// Freeze serializes the bitmap in the CRoaring's frozen format. +func (rb *Bitmap) Freeze() ([]byte, error) { + sz := rb.GetFrozenSizeInBytes() buf := make([]byte, sz) - _, err := bm.FreezeTo(buf) + _, err := rb.FreezeTo(buf) return buf, err } -func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { - containers := bm.highlowcontainer.containers +// FreezeTo serializes the bitmap in the CRoaring's frozen format. +func (rb *Bitmap) FreezeTo(buf []byte) (int, error) { + containers := rb.highlowcontainer.containers nCont := len(containers) nBits, nArrayEl, nRunEl := 0, 0, 0 @@ -516,7 +525,7 @@ func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { serialSize := 4 + 5*nCont + (1<<13)*nBits + 4*nRunEl + 2*nArrayEl if len(buf) < serialSize { - return 0, FrozenBitmapBufferTooSmall + return 0, ErrFrozenBitmapBufferTooSmall } bitsArena := byteSliceAsUint64Slice(buf[:(1<<13)*nBits]) @@ -537,10 +546,10 @@ func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { types := buf[:nCont] buf = buf[nCont:] - header := uint32(FROZEN_COOKIE | (nCont << 15)) + header := uint32(frozenCookie | (nCont << 15)) binary.LittleEndian.PutUint32(buf[:4], header) - copy(keys, bm.highlowcontainer.keys[:]) + copy(keys, rb.highlowcontainer.keys[:]) for i, c := range containers { switch v := c.(type) { @@ -567,11 +576,12 @@ func (bm *Bitmap) FreezeTo(buf []byte) (int, error) { return serialSize, nil } -func (bm *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { +// WriteFrozenTo serializes the bitmap in the CRoaring's frozen format. +func (rb *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { // FIXME: this is a naive version that iterates 4 times through the // containers and allocates 3*len(containers) bytes; it's quite likely // it can be done more efficiently. - containers := bm.highlowcontainer.containers + containers := rb.highlowcontainer.containers written := 0 for _, c := range containers { @@ -610,7 +620,7 @@ func (bm *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { } } - n, err := wr.Write(uint16SliceAsByteSlice(bm.highlowcontainer.keys)) + n, err := wr.Write(uint16SliceAsByteSlice(rb.highlowcontainer.keys)) written += n if err != nil { return written, err @@ -642,7 +652,7 @@ func (bm *Bitmap) WriteFrozenTo(wr io.Writer) (int, error) { return written, err } - header := uint32(FROZEN_COOKIE | (len(containers) << 15)) + header := uint32(frozenCookie | (len(containers) << 15)) if err := binary.Write(wr, binary.LittleEndian, header); err != nil { return written, err } diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md index 4b82fd178f6..848234e2fca 100644 --- a/vendor/github.com/bits-and-blooms/bitset/README.md +++ b/vendor/github.com/bits-and-blooms/bitset/README.md @@ -108,6 +108,21 @@ You can later deserialize the result as follows: The `ReadFrom` function attempts to read the data into the existing BitSet instance, to minimize memory allocations. + +*Performance tip*: +When reading and writing to a file or a network connection, you may get better performance by +wrapping your streams with `bufio` instances. + +E.g., +```Go + f, err := os.Create("myfile") + w := bufio.NewWriter(f) +``` +```Go + f, err := os.Open("myfile") + r := bufio.NewReader(f) +``` + ## Memory Usage The memory usage of a bitset using `N` bits is at least `N/8` bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). diff --git a/vendor/github.com/bits-and-blooms/bitset/SECURITY.md b/vendor/github.com/bits-and-blooms/bitset/SECURITY.md new file mode 100644 index 00000000000..f888420c3ba --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/SECURITY.md @@ -0,0 +1,5 @@ +# Security Policy + +## Reporting a Vulnerability + +You can report privately a vulnerability by email at daniel@lemire.me (current maintainer). diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go index 237285b6042..8fb9e9fa208 100644 --- a/vendor/github.com/bits-and-blooms/bitset/bitset.go +++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go @@ -247,8 +247,13 @@ func (b *BitSet) FlipRange(start, end uint) *BitSet { var startWord uint = start >> log2WordSize var endWord uint = end >> log2WordSize b.set[startWord] ^= ^(^uint64(0) << wordsIndex(start)) - for i := startWord; i < endWord; i++ { - b.set[i] = ^b.set[i] + if endWord > 0 { + // bounds check elimination + data := b.set + _ = data[endWord-1] + for i := startWord; i < endWord; i++ { + data[i] = ^data[i] + } } if end&(wordSize-1) != 0 { b.set[endWord] ^= ^uint64(0) >> wordsIndex(-end) @@ -427,12 +432,16 @@ func (b *BitSet) NextSet(i uint) (uint, bool) { if w != 0 { return i + trailingZeroes64(w), true } - x = x + 1 + x++ + // bounds check elimination in the loop + if x < 0 { + return 0, false + } for x < len(b.set) { if b.set[x] != 0 { return uint(x)*wordSize + trailingZeroes64(b.set[x]), true } - x = x + 1 + x++ } return 0, false @@ -516,10 +525,16 @@ func (b *BitSet) NextClear(i uint) (uint, bool) { return index, true } x++ + // bounds check elimination in the loop + if x < 0 { + return 0, false + } for x < len(b.set) { - index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) - if b.set[x] != allBits && index < b.length { - return index, true + if b.set[x] != allBits { + index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) + if index < b.length { + return index, true + } } x++ } @@ -615,6 +630,12 @@ func (b *BitSet) Equal(c *BitSet) bool { return true } wn := b.wordCount() + // bounds check elimination + if wn <= 0 { + return true + } + _ = b.set[wn-1] + _ = c.set[wn-1] for p := 0; p < wn; p++ { if c.set[p] != b.set[p] { return false @@ -635,9 +656,9 @@ func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { panicIfNull(b) panicIfNull(compare) result = b.Clone() // clone b (in case b is bigger than compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) + l := compare.wordCount() + if l > b.wordCount() { + l = b.wordCount() } for i := 0; i < l; i++ { result.set[i] = b.set[i] &^ compare.set[i] @@ -649,9 +670,9 @@ func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { panicIfNull(b) panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) + l := compare.wordCount() + if l > b.wordCount() { + l = b.wordCount() } cnt := uint64(0) cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) @@ -664,12 +685,19 @@ func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { func (b *BitSet) InPlaceDifference(compare *BitSet) { panicIfNull(b) panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) + l := compare.wordCount() + if l > b.wordCount() { + l = b.wordCount() } + if l <= 0 { + return + } + // bounds check elimination + data, cmpData := b.set, compare.set + _ = data[l-1] + _ = cmpData[l-1] for i := 0; i < l; i++ { - b.set[i] &^= compare.set[i] + data[i] &^= cmpData[i] } } @@ -712,15 +740,24 @@ func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { func (b *BitSet) InPlaceIntersection(compare *BitSet) { panicIfNull(b) panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) - } - for i := 0; i < l; i++ { - b.set[i] &= compare.set[i] + l := compare.wordCount() + if l > b.wordCount() { + l = b.wordCount() + } + if l > 0 { + // bounds check elimination + data, cmpData := b.set, compare.set + _ = data[l-1] + _ = cmpData[l-1] + + for i := 0; i < l; i++ { + data[i] &= cmpData[i] + } } - for i := l; i < len(b.set); i++ { - b.set[i] = 0 + if l >= 0 { + for i := l; i < len(b.set); i++ { + b.set[i] = 0 + } } if compare.length > 0 { if compare.length-1 >= b.length { @@ -760,15 +797,22 @@ func (b *BitSet) UnionCardinality(compare *BitSet) uint { func (b *BitSet) InPlaceUnion(compare *BitSet) { panicIfNull(b) panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) + l := compare.wordCount() + if l > b.wordCount() { + l = b.wordCount() } if compare.length > 0 && compare.length-1 >= b.length { b.extendSet(compare.length - 1) } - for i := 0; i < l; i++ { - b.set[i] |= compare.set[i] + if l > 0 { + // bounds check elimination + data, cmpData := b.set, compare.set + _ = data[l-1] + _ = cmpData[l-1] + + for i := 0; i < l; i++ { + data[i] |= cmpData[i] + } } if len(compare.set) > l { for i := l; i < len(compare.set); i++ { @@ -808,15 +852,21 @@ func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { panicIfNull(b) panicIfNull(compare) - l := int(compare.wordCount()) - if l > int(b.wordCount()) { - l = int(b.wordCount()) + l := compare.wordCount() + if l > b.wordCount() { + l = b.wordCount() } if compare.length > 0 && compare.length-1 >= b.length { b.extendSet(compare.length - 1) } - for i := 0; i < l; i++ { - b.set[i] ^= compare.set[i] + if l > 0 { + // bounds check elimination + data, cmpData := b.set, compare.set + _ = data[l-1] + _ = cmpData[l-1] + for i := 0; i < l; i++ { + data[i] ^= cmpData[i] + } } if len(compare.set) > l { for i := l; i < len(compare.set); i++ { @@ -877,12 +927,16 @@ func (b *BitSet) Any() bool { // IsSuperSet returns true if this is a superset of the other set func (b *BitSet) IsSuperSet(other *BitSet) bool { - for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { - if !b.Test(i) { + l := other.wordCount() + if b.wordCount() < l { + l = b.wordCount() + } + for i, word := range other.set[:l] { + if b.set[i]&word != word { return false } } - return true + return popcntSlice(other.set[l:]) == 0 } // IsStrictSuperSet returns true if this is a strict superset of the other set @@ -908,29 +962,74 @@ func (b *BitSet) BinaryStorageSize() int { return int(wordBytes + wordBytes*uint(b.wordCount())) } +func readUint64Array(reader io.Reader, data []uint64) error { + length := len(data) + bufferSize := 128 + buffer := make([]byte, bufferSize*int(wordBytes)) + for i := 0; i < length; i += bufferSize { + end := i + bufferSize + if end > length { + end = length + buffer = buffer[:wordBytes*uint(end-i)] + } + chunk := data[i:end] + if _, err := io.ReadFull(reader, buffer); err != nil { + return err + } + for i := range chunk { + chunk[i] = uint64(binaryOrder.Uint64(buffer[8*i:])) + } + } + return nil +} + +func writeUint64Array(writer io.Writer, data []uint64) error { + bufferSize := 128 + buffer := make([]byte, bufferSize*int(wordBytes)) + for i := 0; i < len(data); i += bufferSize { + end := i + bufferSize + if end > len(data) { + end = len(data) + buffer = buffer[:wordBytes*uint(end-i)] + } + chunk := data[i:end] + for i, x := range chunk { + binaryOrder.PutUint64(buffer[8*i:], x) + } + _, err := writer.Write(buffer) + if err != nil { + return err + } + } + return nil +} + // WriteTo writes a BitSet to a stream. The format is: // 1. uint64 length // 2. []uint64 set // Upon success, the number of bytes written is returned. +// +// Performance: if this function is used to write to a disk or network +// connection, it might be beneficial to wrap the stream in a bufio.Writer. +// E.g., +// +// f, err := os.Create("myfile") +// w := bufio.NewWriter(f) func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { - buf := make([]byte, wordBytes) length := uint64(b.length) - // Write length - binaryOrder.PutUint64(buf, length) - n, err := stream.Write(buf) + err := binary.Write(stream, binaryOrder, &length) if err != nil { - return int64(n), err + // Upon failure, we do not guarantee that we + // return the number of bytes written. + return int64(0), err } - - nWords := b.wordCount() - for i := range b.set[:nWords] { - binaryOrder.PutUint64(buf, b.set[i]) - if nn, err := stream.Write(buf); err != nil { - return int64(i*int(wordBytes) + nn + n), err - } + err = writeUint64Array(stream, b.set[:b.wordCount()]) + if err != nil { + // Upon failure, we do not guarantee that we + // return the number of bytes written. + return int64(wordBytes), err } - return int64(b.BinaryStorageSize()), nil } @@ -943,12 +1042,16 @@ func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { // it is extended. In case of error, the BitSet is either // left unchanged or made empty if the error occurs too late // to preserve the content. +// +// Performance: if this function is used to read from a disk or network +// connection, it might be beneficial to wrap the stream in a bufio.Reader. +// E.g., +// +// f, err := os.Open("myfile") +// r := bufio.NewReader(f) func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { - buf := make([]byte, wordBytes) - - // Read length first - _, err := io.ReadFull(stream, buf[:]) - length := binaryOrder.Uint64(buf) + var length uint64 + err := binary.Read(stream, binaryOrder, &length) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -969,18 +1072,16 @@ func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { b.length = newlength - for i := 0; i < nWords; i++ { - if _, err := io.ReadFull(stream, buf); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - // We do not want to leave the BitSet partially filled as - // it is error prone. - b.set = b.set[:0] - b.length = 0 - return 0, err + err = readUint64Array(stream, b.set) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF } - b.set[i] = binaryOrder.Uint64(buf) + // We do not want to leave the BitSet partially filled as + // it is error prone. + b.set = b.set[:0] + b.length = 0 + return 0, err } return int64(b.BinaryStorageSize()), nil @@ -1005,7 +1106,7 @@ func (b *BitSet) UnmarshalBinary(data []byte) error { } // MarshalJSON marshals a BitSet as a JSON structure -func (b *BitSet) MarshalJSON() ([]byte, error) { +func (b BitSet) MarshalJSON() ([]byte, error) { buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) _, err := b.WriteTo(buffer) if err != nil { diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go index 9a3766a01c0..7855c04b5b2 100644 --- a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go @@ -15,6 +15,10 @@ func popcntSlice(s []uint64) uint64 { func popcntMaskSlice(s, m []uint64) uint64 { var cnt int + // this explicit check eliminates a bounds check in the loop + if len(m) < len(s) { + panic("mask slice is too short") + } for i := range s { cnt += bits.OnesCount64(s[i] &^ m[i]) } @@ -23,6 +27,10 @@ func popcntMaskSlice(s, m []uint64) uint64 { func popcntAndSlice(s, m []uint64) uint64 { var cnt int + // this explicit check eliminates a bounds check in the loop + if len(m) < len(s) { + panic("mask slice is too short") + } for i := range s { cnt += bits.OnesCount64(s[i] & m[i]) } @@ -31,6 +39,10 @@ func popcntAndSlice(s, m []uint64) uint64 { func popcntOrSlice(s, m []uint64) uint64 { var cnt int + // this explicit check eliminates a bounds check in the loop + if len(m) < len(s) { + panic("mask slice is too short") + } for i := range s { cnt += bits.OnesCount64(s[i] | m[i]) } @@ -39,6 +51,10 @@ func popcntOrSlice(s, m []uint64) uint64 { func popcntXorSlice(s, m []uint64) uint64 { var cnt int + // this explicit check eliminates a bounds check in the loop + if len(m) < len(s) { + panic("mask slice is too short") + } for i := range s { cnt += bits.OnesCount64(s[i] ^ m[i]) } diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go index dd0ed101563..8c00e737a03 100644 --- a/vendor/github.com/golang/glog/glog.go +++ b/vendor/github.com/golang/glog/glog.go @@ -15,8 +15,26 @@ // limitations under the License. // Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// It provides functions that have a name matched by regex: +// +// (Info|Warning|Error|Fatal)(Context)?(Depth)?(f)? +// +// If Context is present, function takes context.Context argument. The +// context is used to pass through the Trace Context to log sinks that can make use +// of it. +// It is recommended to use the context variant of the functions over the non-context +// variants if a context is available to make sure the Trace Contexts are present +// in logs. +// +// If Depth is present, this function calls log from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +// +// If 'f' is present, function formats according to a format specifier. +// +// This package also provides V-style logging controlled by the -v and -vmodule=file=2 flags. // // Basic examples: // @@ -82,6 +100,7 @@ package glog import ( "bytes" + "context" "errors" "fmt" stdLog "log" @@ -182,9 +201,14 @@ func appendBacktrace(depth int, format string, args []any) (string, []any) { return format, args } -// logf writes a log message for a log function call (or log function wrapper) -// at the given depth in the current goroutine's stack. +// logf acts as ctxlogf, but doesn't expect a context. func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + ctxlogf(nil, depth+1, severity, verbose, stack, format, args...) +} + +// ctxlogf writes a log message for a log function call (or log function wrapper) +// at the given depth in the current goroutine's stack. +func ctxlogf(ctx context.Context, depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { now := timeNow() _, file, line, ok := runtime.Caller(depth + 1) if !ok { @@ -198,6 +222,7 @@ func logf(depth int, severity logsink.Severity, verbose bool, stack stack, forma metai, meta := metaPoolGet() *meta = logsink.Meta{ + Context: ctx, Time: now, File: file, Line: line, @@ -207,6 +232,9 @@ func logf(depth int, severity logsink.Severity, verbose bool, stack stack, forma Thread: int64(pid), } sinkf(meta, format, args...) + // Clear pointer fields so they can be garbage collected early. + meta.Context = nil + meta.Stack = nil metaPool.Put(metai) } @@ -418,6 +446,36 @@ func (v Verbose) Infof(format string, args ...any) { } } +// InfoContext is equivalent to the global InfoContext function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContext(ctx context.Context, args ...any) { + v.InfoContextDepth(ctx, 1, args...) +} + +// InfoContextf is equivalent to the global InfoContextf function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContextf(ctx context.Context, format string, args ...any) { + if v { + ctxlogf(ctx, 1, logsink.Info, true, noStack, format, args...) + } +} + +// InfoContextDepth is equivalent to the global InfoContextDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContextDepth(ctx context.Context, depth int, args ...any) { + if v { + ctxlogf(ctx, depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) + } +} + +// InfoContextDepthf is equivalent to the global InfoContextDepthf function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) { + if v { + ctxlogf(ctx, depth+1, logsink.Info, true, noStack, format, args...) + } +} + // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...any) { @@ -450,6 +508,30 @@ func Infof(format string, args ...any) { logf(1, logsink.Info, false, noStack, format, args...) } +// InfoContext is like [Info], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContext(ctx context.Context, args ...any) { + InfoContextDepth(ctx, 1, args...) +} + +// InfoContextf is like [Infof], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContextf(ctx context.Context, format string, args ...any) { + ctxlogf(ctx, 1, logsink.Info, false, noStack, format, args...) +} + +// InfoContextDepth is like [InfoDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContextDepth(ctx context.Context, depth int, args ...any) { + ctxlogf(ctx, depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) +} + +// InfoContextDepthf is like [InfoDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Info, false, noStack, format, args...) +} + // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...any) { @@ -480,6 +562,30 @@ func Warningf(format string, args ...any) { logf(1, logsink.Warning, false, noStack, format, args...) } +// WarningContext is like [Warning], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContext(ctx context.Context, args ...any) { + WarningContextDepth(ctx, 1, args...) +} + +// WarningContextf is like [Warningf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContextf(ctx context.Context, format string, args ...any) { + ctxlogf(ctx, 1, logsink.Warning, false, noStack, format, args...) +} + +// WarningContextDepth is like [WarningDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContextDepth(ctx context.Context, depth int, args ...any) { + ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningContextDepthf is like [WarningDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func WarningContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, format, args...) +} + // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...any) { @@ -510,8 +616,32 @@ func Errorf(format string, args ...any) { logf(1, logsink.Error, false, noStack, format, args...) } -func fatalf(depth int, format string, args ...any) { - logf(depth+1, logsink.Fatal, false, withStack, format, args...) +// ErrorContext is like [Error], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContext(ctx context.Context, args ...any) { + ErrorContextDepth(ctx, 1, args...) +} + +// ErrorContextf is like [Errorf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContextf(ctx context.Context, format string, args ...any) { + ctxlogf(ctx, 1, logsink.Error, false, noStack, format, args...) +} + +// ErrorContextDepth is like [ErrorDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContextDepth(ctx context.Context, depth int, args ...any) { + ctxlogf(ctx, depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorContextDepthf is like [ErrorDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ErrorContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Error, false, noStack, format, args...) +} + +func ctxfatalf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Fatal, false, withStack, format, args...) sinks.file.Flush() err := abortProcess() // Should not return. @@ -523,6 +653,10 @@ func fatalf(depth int, format string, args ...any) { os.Exit(2) // Exit with the same code as the default SIGABRT handler. } +func fatalf(depth int, format string, args ...any) { + ctxfatalf(nil, depth+1, format, args...) +} + // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. @@ -556,12 +690,39 @@ func Fatalf(format string, args ...any) { fatalf(1, format, args...) } -func exitf(depth int, format string, args ...any) { - logf(depth+1, logsink.Fatal, false, noStack, format, args...) +// FatalContext is like [Fatal], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func FatalContext(ctx context.Context, args ...any) { + FatalContextDepth(ctx, 1, args...) +} + +// FatalContextf is like [Fatalf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func FatalContextf(ctx context.Context, format string, args ...any) { + ctxfatalf(ctx, 1, format, args...) +} + +// FatalContextDepth is like [FatalDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func FatalContextDepth(ctx context.Context, depth int, args ...any) { + ctxfatalf(ctx, depth+1, defaultFormat(args), args...) +} + +// FatalContextDepthf is like [FatalDepthf], but with an extra [context.Context] parameter. +func FatalContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxfatalf(ctx, depth+1, format, args...) +} + +func ctxexitf(ctx context.Context, depth int, format string, args ...any) { + ctxlogf(ctx, depth+1, logsink.Fatal, false, noStack, format, args...) sinks.file.Flush() os.Exit(1) } +func exitf(depth int, format string, args ...any) { + ctxexitf(nil, depth+1, format, args...) +} + // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...any) { @@ -590,3 +751,27 @@ func Exitln(args ...any) { func Exitf(format string, args ...any) { exitf(1, format, args...) } + +// ExitContext is like [Exit], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContext(ctx context.Context, args ...any) { + ExitContextDepth(ctx, 1, args...) +} + +// ExitContextf is like [Exitf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContextf(ctx context.Context, format string, args ...any) { + ctxexitf(ctx, 1, format, args...) +} + +// ExitContextDepth is like [ExitDepth], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContextDepth(ctx context.Context, depth int, args ...any) { + ctxexitf(ctx, depth+1, defaultFormat(args), args...) +} + +// ExitContextDepthf is like [ExitDepthf], but with an extra [context.Context] parameter. The +// context is used to pass the Trace Context to log sinks. +func ExitContextDepthf(ctx context.Context, depth int, format string, args ...any) { + ctxexitf(ctx, depth+1, format, args...) +} diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink.go b/vendor/github.com/golang/glog/internal/logsink/logsink.go index 53758e1c9f5..28c38a6abc6 100644 --- a/vendor/github.com/golang/glog/internal/logsink/logsink.go +++ b/vendor/github.com/golang/glog/internal/logsink/logsink.go @@ -16,6 +16,7 @@ package logsink import ( "bytes" + "context" "fmt" "strconv" "strings" @@ -77,6 +78,11 @@ func ParseSeverity(name string) (Severity, error) { // Meta is metadata about a logging call. type Meta struct { + // The context with which the log call was made (or nil). If set, the context + // is only valid during the logsink.Structured.Printf call, it should not be + // retained. + Context context.Context + // Time is the time at which the log call was made. Time time.Time diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go deleted file mode 100644 index 6c16c255ffb..00000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONUnmarshalV2 = false - -// UnmarshalNext unmarshals the next JSON object from d into m. -func UnmarshalNext(d *json.Decoder, m proto.Message) error { - return new(Unmarshaler).UnmarshalNext(d, m) -} - -// Unmarshal unmarshals a JSON object from r into m. -func Unmarshal(r io.Reader, m proto.Message) error { - return new(Unmarshaler).Unmarshal(r, m) -} - -// UnmarshalString unmarshals a JSON object from s into m. -func UnmarshalString(s string, m proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // AllowUnknownFields specifies whether to allow messages to contain - // unknown JSON fields, as opposed to failing to unmarshal. - AllowUnknownFields bool - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize the way -// they are unmarshaled from JSON. Messages that implement this should also -// implement JSONPBMarshaler so that the custom format can be produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Unmarshal unmarshals a JSON object from r into m. -func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { - return u.UnmarshalNext(json.NewDecoder(r), m) -} - -// UnmarshalNext unmarshals the next JSON object from d into m. -func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { - if m == nil { - return errors.New("invalid nil message") - } - - // Parse the next JSON object from the stream. - raw := json.RawMessage{} - if err := d.Decode(&raw); err != nil { - return err - } - - // Check for custom unmarshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsu, ok := m.(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, raw) - } - - mr := proto.MessageReflect(m) - - // NOTE: For historical reasons, a top-level null is treated as a noop. - // This is incorrect, but kept for compatibility. - if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { - return nil - } - - if wrapJSONUnmarshalV2 { - // NOTE: If input message is non-empty, we need to preserve merge semantics - // of the old jsonpb implementation. These semantics are not supported by - // the protobuf JSON specification. - isEmpty := true - mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { - isEmpty = false // at least one iteration implies non-empty - return false - }) - if !isEmpty { - // Perform unmarshaling into a newly allocated, empty message. - mr = mr.New() - - // Use a defer to copy all unmarshaled fields into the original message. - dst := proto.MessageReflect(m) - defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - dst.Set(fd, v) - return true - }) - } - - // Unmarshal using the v2 JSON unmarshaler. - opts := protojson.UnmarshalOptions{ - DiscardUnknown: u.AllowUnknownFields, - } - if u.AnyResolver != nil { - opts.Resolver = anyResolver{u.AnyResolver} - } - return opts.Unmarshal(raw, mr.Interface()) - } else { - if err := u.unmarshalMessage(mr, raw); err != nil { - return err - } - return protoV2.CheckInitialized(mr.Interface()) - } -} - -func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { - md := m.Descriptor() - fds := md.Fields() - - if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, in) - } - - if string(in) == "null" && md.FullName() != "google.protobuf.Value" { - return nil - } - - switch wellKnownType(md.FullName()) { - case "Any": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - rawTypeURL, ok := jsonObject["@type"] - if !ok { - return errors.New("Any JSON doesn't have '@type'") - } - typeURL, err := unquoteString(string(rawTypeURL)) - if err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) - } - m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) - - var m2 protoreflect.Message - if u.AnyResolver != nil { - mi, err := u.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - if err == protoregistry.NotFound { - return fmt.Errorf("could not resolve Any message type: %v", typeURL) - } - return err - } - m2 = mt.New() - } - - if wellKnownType(m2.Descriptor().FullName()) != "" { - rawValue, ok := jsonObject["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - if err := u.unmarshalMessage(m2, rawValue); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } else { - delete(jsonObject, "@type") - rawJSON, err := json.Marshal(jsonObject) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - if err = u.unmarshalMessage(m2, rawJSON); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) - } - } - - rawWire, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) - return nil - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - v, err := u.unmarshalValue(m.NewField(fd), in, fd) - if err != nil { - return err - } - m.Set(fd, v) - return nil - case "Duration": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - d, err := time.ParseDuration(v) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - sec := d.Nanoseconds() / 1e9 - nsec := d.Nanoseconds() % 1e9 - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Timestamp": - v, err := unquoteString(string(in)) - if err != nil { - return err - } - t, err := time.Parse(time.RFC3339Nano, v) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - sec := t.Unix() - nsec := t.Nanosecond() - m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) - m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) - return nil - case "Value": - switch { - case string(in) == "null": - m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) - case string(in) == "true": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) - case string(in) == "false": - m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) - case hasPrefixAndSuffix('"', in, '"'): - s, err := unquoteString(string(in)) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) - case hasPrefixAndSuffix('[', in, ']'): - v := m.Mutable(fds.ByNumber(6)) - return u.unmarshalMessage(v.Message(), in) - case hasPrefixAndSuffix('{', in, '}'): - v := m.Mutable(fds.ByNumber(5)) - return u.unmarshalMessage(v.Message(), in) - default: - f, err := strconv.ParseFloat(string(in), 0) - if err != nil { - return fmt.Errorf("unrecognized type for Value %q", in) - } - m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) - } - return nil - case "ListValue": - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - lv := m.Mutable(fds.ByNumber(1)).List() - for _, raw := range jsonArray { - ve := lv.NewElement() - if err := u.unmarshalMessage(ve.Message(), raw); err != nil { - return err - } - lv.Append(ve) - } - return nil - case "Struct": - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - mv := m.Mutable(fds.ByNumber(1)).Map() - for key, raw := range jsonObject { - kv := protoreflect.ValueOf(key).MapKey() - vv := mv.NewValue() - if err := u.unmarshalMessage(vv.Message(), raw); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) - } - mv.Set(kv, vv) - } - return nil - } - - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return err - } - - // Handle known fields. - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if fd.IsWeak() && fd.Message().IsPlaceholder() { - continue // weak reference is not linked in - } - - // Search for any raw JSON value associated with this field. - var raw json.RawMessage - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - name = string(fd.JSONName()) - if v, ok := jsonObject[name]; ok { - delete(jsonObject, name) - raw = v - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - // Handle extension fields. - for name, raw := range jsonObject { - if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { - continue - } - - // Resolve the extension field by name. - xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(md) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - continue - } - delete(jsonObject, name) - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) - } - - field := m.NewField(fd) - // Unmarshal the field value. - if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { - continue - } - v, err := u.unmarshalValue(field, raw, fd) - if err != nil { - return err - } - m.Set(fd, v) - } - - if !u.AllowUnknownFields && len(jsonObject) > 0 { - for name := range jsonObject { - return fmt.Errorf("unknown field %q in %v", name, md.FullName()) - } - } - return nil -} - -func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { - if fd.Cardinality() == protoreflect.Repeated { - return false - } - if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" - } - if ed := fd.Enum(); ed != nil { - return ed.FullName() == "google.protobuf.NullValue" - } - return false -} - -func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { - if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { - _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) - return ok - } - return false -} - -func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch { - case fd.IsList(): - var jsonArray []json.RawMessage - if err := json.Unmarshal(in, &jsonArray); err != nil { - return v, err - } - lv := v.List() - for _, raw := range jsonArray { - ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) - if err != nil { - return v, err - } - lv.Append(ve) - } - return v, nil - case fd.IsMap(): - var jsonObject map[string]json.RawMessage - if err := json.Unmarshal(in, &jsonObject); err != nil { - return v, err - } - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - for key, raw := range jsonObject { - var kv protoreflect.MapKey - if kfd.Kind() == protoreflect.StringKind { - kv = protoreflect.ValueOf(key).MapKey() - } else { - v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) - if err != nil { - return v, err - } - kv = v.MapKey() - } - - vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) - if err != nil { - return v, err - } - mv.Set(kv, vv) - } - return v, nil - default: - return u.unmarshalSingularValue(v, in, fd) - } -} - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(+1), - `"-Infinity"`: math.Inf(-1), -} - -func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - switch fd.Kind() { - case protoreflect.BoolKind: - return unmarshalValue(in, new(bool)) - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - return unmarshalValue(trimQuote(in), new(int32)) - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return unmarshalValue(trimQuote(in), new(int64)) - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - return unmarshalValue(trimQuote(in), new(uint32)) - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return unmarshalValue(trimQuote(in), new(uint64)) - case protoreflect.FloatKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat32(float32(f)), nil - } - return unmarshalValue(trimQuote(in), new(float32)) - case protoreflect.DoubleKind: - if f, ok := nonFinite[string(in)]; ok { - return protoreflect.ValueOfFloat64(float64(f)), nil - } - return unmarshalValue(trimQuote(in), new(float64)) - case protoreflect.StringKind: - return unmarshalValue(in, new(string)) - case protoreflect.BytesKind: - return unmarshalValue(in, new([]byte)) - case protoreflect.EnumKind: - if hasPrefixAndSuffix('"', in, '"') { - vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) - if vd == nil { - return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) - } - return protoreflect.ValueOfEnum(vd.Number()), nil - } - return unmarshalValue(in, new(protoreflect.EnumNumber)) - case protoreflect.MessageKind, protoreflect.GroupKind: - err := u.unmarshalMessage(v.Message(), in) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } -} - -func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { - err := json.Unmarshal(in, v) - return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err -} - -func unquoteString(in string) (out string, err error) { - err = json.Unmarshal([]byte(in), &out) - return out, err -} - -func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { - if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { - return true - } - return false -} - -// trimQuote is like unquoteString but simply strips surrounding quotes. -// This is incorrect, but is behavior done by the legacy implementation. -func trimQuote(in []byte) []byte { - if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { - in = in[1 : len(in)-1] - } - return in -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go deleted file mode 100644 index 685c80a62bc..00000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/encode.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package jsonpb - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/encoding/protojson" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapJSONMarshalV2 = false - -// Marshaler is a configurable object for marshaling protocol buffer messages -// to the specified JSON representation. -type Marshaler struct { - // OrigName specifies whether to use the original protobuf name for fields. - OrigName bool - - // EnumsAsInts specifies whether to render enum values as integers, - // as opposed to string values. - EnumsAsInts bool - - // EmitDefaults specifies whether to render fields with zero values. - EmitDefaults bool - - // Indent controls whether the output is compact or not. - // If empty, the output is compact JSON. Otherwise, every JSON object - // entry and JSON array value will be on its own line. - // Each line will be preceded by repeated copies of Indent, where the - // number of copies is the current indentation depth. - Indent string - - // AnyResolver is used to resolve the google.protobuf.Any well-known type. - // If unset, the global registry is used by default. - AnyResolver AnyResolver -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should also -// implement JSONPBUnmarshaler so that the custom format can be parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -// -// Deprecated: Custom types should implement protobuf reflection instead. -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// Marshal serializes a protobuf message as JSON into w. -func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { - b, err := jm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// MarshalToString serializes a protobuf message as JSON in string form. -func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { - b, err := jm.marshal(m) - if err != nil { - return "", err - } - return string(b), nil -} - -func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { - v := reflect.ValueOf(m) - if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, errors.New("Marshal called with nil") - } - - // Check for custom marshalers first since they may not properly - // implement protobuf reflection that the logic below relies on. - if jsm, ok := m.(JSONPBMarshaler); ok { - return jsm.MarshalJSONPB(jm) - } - - if wrapJSONMarshalV2 { - opts := protojson.MarshalOptions{ - UseProtoNames: jm.OrigName, - UseEnumNumbers: jm.EnumsAsInts, - EmitUnpopulated: jm.EmitDefaults, - Indent: jm.Indent, - } - if jm.AnyResolver != nil { - opts.Resolver = anyResolver{jm.AnyResolver} - } - return opts.Marshal(proto.MessageReflect(m).Interface()) - } else { - // Check for unpopulated required fields first. - m2 := proto.MessageReflect(m) - if err := protoV2.CheckInitialized(m2.Interface()); err != nil { - return nil, err - } - - w := jsonWriter{Marshaler: jm} - err := w.marshalMessage(m2, "", "") - return w.buf, err - } -} - -type jsonWriter struct { - *Marshaler - buf []byte -} - -func (w *jsonWriter) write(s string) { - w.buf = append(w.buf, s...) -} - -func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { - if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(w.Marshaler) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - w.write(string(b)) - return nil - } - - md := m.Descriptor() - fds := md.Fields() - - // Handle well-known types. - const secondInNanos = int64(time.Second / time.Nanosecond) - switch wellKnownType(md.FullName()) { - case "Any": - return w.marshalAny(m, indent) - case "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue": - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - case "Duration": - const maxSecondsInDuration = 315576000000 - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if s < -maxSecondsInDuration || s > maxSecondsInDuration { - return fmt.Errorf("seconds out of range %v", s) - } - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - var sign string - if s < 0 || ns < 0 { - sign, s, ns = "-", -1*s, -1*ns - } - x := fmt.Sprintf("%s%d.%09d", sign, s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vs"`, x)) - return nil - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s := m.Get(fds.ByNumber(1)).Int() - ns := m.Get(fds.ByNumber(2)).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - w.write(fmt.Sprintf(`"%vZ"`, x)) - return nil - case "Value": - // JSON value; which is a null, number, string, bool, object, or array. - od := md.Oneofs().Get(0) - fd := m.WhichOneof(od) - if fd == nil { - return errors.New("nil Value") - } - return w.marshalValue(fd, m.Get(fd), indent) - case "Struct", "ListValue": - // JSON object or array. - fd := fds.ByNumber(1) - return w.marshalValue(fd, m.Get(fd), indent) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - - firstField := true - if typeURL != "" { - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - if fd == nil { - continue - } - } else { - i++ - } - - v := m.Get(fd) - - if !m.Has(fd) { - if !w.EmitDefaults || fd.ContainingOneof() != nil { - continue - } - if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { - v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars - } - } - - if !firstField { - w.writeComma() - } - if err := w.marshalField(fd, v, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if md.ExtensionRanges().Len() > 0 { - // Collect a sorted list of all extension descriptor and values. - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - if !firstField { - w.writeComma() - } - if err := w.marshalField(ext.desc, ext.val, indent); err != nil { - return err - } - firstField = false - } - } - - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) writeComma() { - if w.Indent != "" { - w.write(",\n") - } else { - w.write(",") - } -} - -func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - md := m.Descriptor() - typeURL := m.Get(md.Fields().ByNumber(1)).String() - rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() - - var m2 protoreflect.Message - if w.AnyResolver != nil { - mi, err := w.AnyResolver.Resolve(typeURL) - if err != nil { - return err - } - m2 = proto.MessageReflect(mi) - } else { - mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) - if err != nil { - return err - } - m2 = mt.New() - } - - if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { - return err - } - - if wellKnownType(m2.Descriptor().FullName()) == "" { - return w.marshalMessage(m2, indent, typeURL) - } - - w.write("{") - if w.Indent != "" { - w.write("\n") - } - if err := w.marshalTypeURL(indent, typeURL); err != nil { - return err - } - w.writeComma() - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - w.write(`"value": `) - } else { - w.write(`"value":`) - } - if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { - return err - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - } - w.write("}") - return nil -} - -func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"@type":`) - if w.Indent != "" { - w.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - w.write(string(b)) - return nil -} - -// marshalField writes field description and value to the Writer. -func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - if w.Indent != "" { - w.write(indent) - w.write(w.Indent) - } - w.write(`"`) - switch { - case fd.IsExtension(): - // For message set, use the fname of the message as the extension name. - name := string(fd.FullName()) - if isMessageSet(fd.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - w.write("[" + name + "]") - case w.OrigName: - name := string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { - name = string(fd.Message().Name()) - } - w.write(name) - default: - w.write(string(fd.JSONName())) - } - w.write(`":`) - if w.Indent != "" { - w.write(" ") - } - return w.marshalValue(fd, v, indent) -} - -func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case fd.IsList(): - w.write("[") - comma := "" - lv := v.List() - for i := 0; i < lv.Len(); i++ { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write("]") - return nil - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := v.Map() - - // Collect a sorted list of all map keys and values. - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - - w.write(`{`) - comma := "" - for _, entry := range entries { - w.write(comma) - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - w.write(w.Indent) - } - - s := fmt.Sprint(entry.key.Interface()) - b, err := json.Marshal(s) - if err != nil { - return err - } - w.write(string(b)) - - w.write(`:`) - if w.Indent != "" { - w.write(` `) - } - - if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { - return err - } - comma = "," - } - if w.Indent != "" { - w.write("\n") - w.write(indent) - w.write(w.Indent) - } - w.write(`}`) - return nil - default: - return w.marshalSingularValue(fd, v, indent) - } -} - -func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { - switch { - case !v.IsValid(): - w.write("null") - return nil - case fd.Message() != nil: - return w.marshalMessage(v.Message(), indent+w.Indent, "") - case fd.Enum() != nil: - if fd.Enum().FullName() == "google.protobuf.NullValue" { - w.write("null") - return nil - } - - vd := fd.Enum().Values().ByNumber(v.Enum()) - if vd == nil || w.EnumsAsInts { - w.write(strconv.Itoa(int(v.Enum()))) - } else { - w.write(`"` + string(vd.Name()) + `"`) - } - return nil - default: - switch v.Interface().(type) { - case float32, float64: - switch { - case math.IsInf(v.Float(), +1): - w.write(`"Infinity"`) - return nil - case math.IsInf(v.Float(), -1): - w.write(`"-Infinity"`) - return nil - case math.IsNaN(v.Float()): - w.write(`"NaN"`) - return nil - } - case int64, uint64: - w.write(fmt.Sprintf(`"%d"`, v.Interface())) - return nil - } - - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - w.write(string(b)) - return nil - } -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go deleted file mode 100644 index 480e2448de6..00000000000 --- a/vendor/github.com/golang/protobuf/jsonpb/json.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jsonpb provides functionality to marshal and unmarshal between a -// protocol buffer message and JSON. It follows the specification at -// https://developers.google.com/protocol-buffers/docs/proto3#json. -// -// Do not rely on the default behavior of the standard encoding/json package -// when called on generated message types as it does not operate correctly. -// -// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" -// package instead. -package jsonpb - -import ( - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// AnyResolver takes a type URL, present in an Any message, -// and resolves it into an instance of the associated message. -type AnyResolver interface { - Resolve(typeURL string) (proto.Message, error) -} - -type anyResolver struct{ AnyResolver } - -func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - return r.FindMessageByURL(string(message)) -} - -func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { - m, err := r.Resolve(url) - if err != nil { - return nil, err - } - return protoimpl.X.MessageTypeOf(m), nil -} - -func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -func wellKnownType(s protoreflect.FullName) string { - if s.Parent() == "google.protobuf" { - switch s.Name() { - case "Empty", "Any", - "BoolValue", "BytesValue", "StringValue", - "Int32Value", "UInt32Value", "FloatValue", - "Int64Value", "UInt64Value", "DoubleValue", - "Duration", "Timestamp", - "NullValue", "Struct", "Value", "ListValue": - return string(s.Name()) - } - } - return "" -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 85f9f57365f..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33deb..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d28..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd851f..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3ef3..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go deleted file mode 100644 index 16686a65523..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/empty/empty.proto - -package empty - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/empty.proto. - -type Empty = emptypb.Empty - -var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } -func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { - if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File - file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70d3..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f8076009..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829dc64..7ec5ac7ea90 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec27..dc60082d3b3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb689..3167b643d45 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig new file mode 100644 index 00000000000..c6b74c3e0d0 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore new file mode 100644 index 00000000000..577a89e8138 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.gitignore @@ -0,0 +1,2 @@ +# Output of the go test coverage tool +coverage.coverprofile diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE index 66ea3c8ae71..bb9d80bc9b6 100644 --- a/vendor/github.com/gorilla/handlers/LICENSE +++ b/vendor/github.com/gorilla/handlers/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile new file mode 100644 index 00000000000..003b784f7ed --- /dev/null +++ b/vendor/github.com/gorilla/handlers/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: verify +verify: sec govulncheck lint test + +.PHONY: lint +lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint #####" + golangci-lint run -v + +.PHONY: sec +sec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec #####" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck #####" + govulncheck ./... + +.PHONY: test +test: + @echo "##### Running tests #####" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md index 6eba66bf302..02555b2642c 100644 --- a/vendor/github.com/gorilla/handlers/README.md +++ b/vendor/github.com/gorilla/handlers/README.md @@ -1,10 +1,10 @@ -gorilla/handlers -================ +# gorilla/handlers + +![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) +[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) [![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) -[![CircleCI](https://circleci.com/gh/gorilla/handlers.svg?style=svg)](https://circleci.com/gh/gorilla/handlers) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) - Package handlers is a collection of handlers (aka "HTTP middleware") for use with Go's `net/http` package (or any framework supporting `http.Handler`), including: diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go index 8437fefc1ef..7121f5307be 100644 --- a/vendor/github.com/gorilla/handlers/canonical.go +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -21,12 +21,11 @@ type canonical struct { // // Example: // -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) // +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { fn := func(h http.Handler) http.Handler { return canonical{h, domain, code} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go index 1e95f1ccbfa..d6f589503b5 100644 --- a/vendor/github.com/gorilla/handlers/compress.go +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -44,13 +44,13 @@ type flusher interface { Flush() error } -func (w *compressResponseWriter) Flush() { +func (cw *compressResponseWriter) Flush() { // Flush compressed data if compressor supports it. - if f, ok := w.compressor.(flusher); ok { - f.Flush() + if f, ok := cw.compressor.(flusher); ok { + _ = f.Flush() } // Flush HTTP response. - if f, ok := w.w.(http.Flusher); ok { + if f, ok := cw.w.(http.Flusher); ok { f.Flush() } } diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go index 0dcdffb3d32..8af9c096e5e 100644 --- a/vendor/github.com/gorilla/handlers/cors.go +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -26,14 +26,14 @@ type cors struct { type OriginValidator func(string) bool var ( - defaultCorsOptionStatusCode = 200 - defaultCorsMethods = []string{"GET", "HEAD", "POST"} + defaultCorsOptionStatusCode = http.StatusOK + defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} - // (WebKit/Safari v9 sends the Origin header by default in AJAX requests) + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). ) const ( - corsOptionMethod string = "OPTIONS" + corsOptionMethod string = http.MethodOptions corsAllowOriginHeader string = "Access-Control-Allow-Origin" corsExposeHeadersHeader string = "Access-Control-Expose-Headers" corsMaxAgeHeader string = "Access-Control-Max-Age" @@ -101,10 +101,8 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ch.isMatch(method, defaultCorsMethods) { w.Header().Set(corsAllowMethodsHeader, method) } - } else { - if len(ch.exposedHeaders) > 0 { - w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) - } + } else if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) } if ch.allowCredentials { @@ -141,22 +139,21 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { // CORS provides Cross-Origin Resource Sharing middleware. // Example: // -// import ( -// "net/http" -// -// "github.com/gorilla/handlers" -// "github.com/gorilla/mux" -// ) +// import ( +// "net/http" // -// func main() { -// r := mux.NewRouter() -// r.HandleFunc("/users", UserEndpoint) -// r.HandleFunc("/projects", ProjectEndpoint) +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) // -// // Apply the CORS middleware to our top-level router, with the defaults. -// http.ListenAndServe(":8000", handlers.CORS()(r)) -// } +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) // +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } func CORS(opts ...CORSOption) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { ch := parseCORSOptions(opts...) @@ -174,7 +171,7 @@ func parseCORSOptions(opts ...CORSOption) *cors { } for _, option := range opts { - option(ch) + _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? } return ch diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go index 0509482ad7a..9b92fce3333 100644 --- a/vendor/github.com/gorilla/handlers/handlers.go +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -35,7 +35,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(allow) w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { + if req.Method == http.MethodOptions { w.WriteHeader(http.StatusOK) } else { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -44,7 +44,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } // responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP -// status code and body size +// status code and body size. type responseLogger struct { w http.ResponseWriter status int @@ -97,7 +97,7 @@ func isContentType(h http.Header, contentType string) bool { // Only PUT, POST, and PATCH requests are considered. func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { + if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { h.ServeHTTP(w, r) return } @@ -108,7 +108,10 @@ func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return } } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", + r.Header.Get("Content-Type"), + contentTypes), + http.StatusUnsupportedMediaType) }) } @@ -133,12 +136,12 @@ const ( // Form method takes precedence over header method. func HTTPMethodOverrideHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { + if r.Method == http.MethodPost { om := r.FormValue(HTTPMethodOverrideFormKey) if om == "" { om = r.Header.Get(HTTPMethodOverrideHeader) } - if om == "PUT" || om == "PATCH" || om == "DELETE" { + if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { r.Method = om } } diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go index 228465eba00..2badb6fbff8 100644 --- a/vendor/github.com/gorilla/handlers/logging.go +++ b/vendor/github.com/gorilla/handlers/logging.go @@ -18,7 +18,7 @@ import ( // Logging -// LogFormatterParams is the structure any formatter will be handed when time to log comes +// LogFormatterParams is the structure any formatter will be handed when time to log comes. type LogFormatterParams struct { Request *http.Request URL url.URL @@ -27,7 +27,7 @@ type LogFormatterParams struct { Size int } -// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler +// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. type LogFormatter func(writer io.Writer, params LogFormatterParams) // loggingHandler is the http.Handler implementation for LoggingHandlerTo and its @@ -46,7 +46,10 @@ func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { h.handler.ServeHTTP(w, req) if req.MultipartForm != nil { - req.MultipartForm.RemoveAll() + err := req.MultipartForm.RemoveAll() + if err != nil { + return + } } params := LogFormatterParams{ @@ -76,7 +79,7 @@ const lowerhex = "0123456789abcdef" func appendQuoted(buf []byte, s string) []byte { var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { + for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 r := rune(s[0]) width = 1 if r >= utf8.RuneSelf { @@ -191,7 +194,7 @@ func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int func writeLog(writer io.Writer, params LogFormatterParams) { buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) buf = append(buf, '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. @@ -204,7 +207,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { buf = append(buf, `" "`...) buf = appendQuoted(buf, params.Request.UserAgent()) buf = append(buf, '"', '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in @@ -212,7 +215,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { // // See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. // -// LoggingHandler always sets the ident field of the log to - +// LoggingHandler always sets the ident field of the log to -. func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeCombinedLog} } @@ -226,19 +229,18 @@ func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("This is a catch-all route")) -// }) -// loggedRouter := handlers.LoggingHandler(os.Stdout, r) -// http.ListenAndServe(":1123", loggedRouter) -// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) func LoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeLog} } // CustomLoggingHandler provides a way to supply a custom log formatter -// while taking advantage of the mechanisms in this package +// while taking advantage of the mechanisms in this package. func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { return loggingHandler{out, h, f} } diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go index ed939dcef5d..281d753e95a 100644 --- a/vendor/github.com/gorilla/handlers/proxy_headers.go +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -18,7 +18,7 @@ var ( var ( // RFC7239 defines a new "Forwarded: " header designed to replace the // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. forwarded = http.CanonicalHeaderKey("Forwarded") // Allows for a sub-match of the first value after 'for=' to the next // comma, semi-colon or space. The match is case-insensitive. @@ -67,7 +67,9 @@ func ProxyHeaders(h http.Handler) http.Handler { func getIP(r *http.Request) string { var addr string - if fwd := r.Header.Get(xForwardedFor); fwd != "" { + switch { + case r.Header.Get(xForwardedFor) != "": + fwd := r.Header.Get(xForwardedFor) // Only grab the first (client) address. Note that '192.168.0.1, // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after // the first may represent forwarding proxies earlier in the chain. @@ -76,17 +78,15 @@ func getIP(r *http.Request) string { s = len(fwd) } addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { + case r.Header.Get(xRealIP) != "": + addr = r.Header.Get(xRealIP) + case r.Header.Get(forwarded) != "": // match should contain at least two elements if the protocol was // specified in the Forwarded header. The first element will always be // the 'for=' capture, which we ignore. In the case of multiple IP // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { // IPv6 addresses in Forwarded headers are quoted-strings. We strip // these quotes. addr = strings.Trim(match[1], `"`) diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go index 4c4c1d9c6ce..0d4f955ecbd 100644 --- a/vendor/github.com/gorilla/handlers/recovery.go +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -36,12 +36,12 @@ func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// panic("Unexpected error!") -// }) +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) // -// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { return func(h http.Handler) http.Handler { r := &recoveryHandler{handler: h} @@ -50,20 +50,22 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { } // RecoveryLogger is a functional option to override -// the default logger +// the default logger. func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? r.logger = logger } } // PrintRecoveryStack is a functional option to enable // or disable printing stack traces on panic. -func PrintRecoveryStack(print bool) RecoveryOption { +func PrintRecoveryStack(shouldPrint bool) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) - r.printStack = print + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.printStack = shouldPrint } } diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig new file mode 100644 index 00000000000..c6b74c3e0d0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore new file mode 100644 index 00000000000..84039fec687 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392ee59..00000000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 6903df6386e..bb9d80bc9b6 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile new file mode 100644 index 00000000000..98f5ab75f9d --- /dev/null +++ b/vendor/github.com/gorilla/mux/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 35eea9f1063..382513d57c4 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,12 +1,12 @@ # gorilla/mux -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) +![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) +[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) -https://www.gorillatoolkit.org/pkg/mux +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to their respective handler. @@ -247,32 +247,25 @@ type spaHandler struct { // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) + // Join internally call path.Clean to prevent directory traversal + path := filepath.Join(h.staticPath, r.URL.Path) - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html + // check whether a file exists or is a directory at the given path + fi, err := os.Stat(path) + if os.IsNotExist(err) || fi.IsDir() { + // file does not exist or path is a directory, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop + } + + if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) - return + return } - // otherwise, use http.FileServer to serve the static dir + // otherwise, use http.FileServer to serve the static file http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } @@ -375,6 +368,19 @@ url, err := r.Get("article").URL("subdomain", "news", "id", "42") ``` +To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: +```go +r := mux.NewRouter() +r.Host("{domain}"). + Path("/{group}/{item_id}"). + Queries("some_data1", "{some_data1}"). + Queries("some_data2", "{some_data2}"). + Name("article") + +// Will print [domain group item_id some_data1 some_data2] +fmt.Println(r.Get("article").GetVarNames()) + +``` ### Walking Routes The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, @@ -572,7 +578,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler r := mux.NewRouter() r.HandleFunc("/", handler) -amw := authenticationMiddleware{} +amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) @@ -758,7 +764,8 @@ func TestMetricsHandler(t *testing.T) { rr := httptest.NewRecorder() - // Need to create a router that we can pass the request through so that the vars will be added to the context + // To add the vars to the context, + // we need to create a router through which we can pass the request. router := mux.NewRouter() router.HandleFunc("/metrics/{type}", MetricsHandler) router.ServeHTTP(rr, req) diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index bd5a38b55d8..80601351fd0 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. + - Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + - URL hosts, paths and query values can have variables with an optional + regular expression. + - Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + - Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + - It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. Let's start registering a couple of URL paths and handlers: @@ -301,6 +301,5 @@ A more complex authentication middleware, which maps session token to users, cou r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 782a34b22a6..1e089906fad 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -31,24 +31,26 @@ func NewRouter() *Router { // It implements the http.Handler interface, so it can be registered to serve // requests: // -// var router = mux.NewRouter() +// var router = mux.NewRouter() // -// func main() { -// http.Handle("/", router) -// } +// func main() { +// http.Handle("/", router) +// } // // Or, for Google App Engine, register it in a init() function: // -// func init() { -// http.Handle("/", router) -// } +// func init() { +// http.Handle("/", router) +// } // // This will send all incoming requests to the router. type Router struct { // Configurable Handler to be used when no route matches. + // This can be used to render your own 404 Not Found errors. NotFoundHandler http.Handler // Configurable Handler to be used when the request method does not match the route. + // This can be used to render your own 405 Method Not Allowed errors. MethodNotAllowedHandler http.Handler // Routes to be matched, in order. diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 0144842bb23..5d05cfa0e9e 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -22,10 +22,10 @@ type routeRegexpOptions struct { type regexpType int const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 + regexpTypePath regexpType = iota + regexpTypeHost + regexpTypePrefix + regexpTypeQuery ) // newRouteRegexp parses a route template and returns a routeRegexp, @@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 750afe570d0..e8f11df221f 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -64,8 +64,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { match.MatchErr = nil } - matchErr = nil + matchErr = nil // nolint:ineffassign return false + } else { + // Multiple routes may share the same path but use different HTTP methods. For instance: + // Route 1: POST "/users/{id}". + // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". + // + // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", + // The router should return a "Not Found" error as no route fully matches this request. + if match.MatchErr == ErrMethodMismatch { + match.MatchErr = nil + } } } @@ -230,9 +240,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // Headers adds a matcher for request header values. // It accepts a sequence of key/value pairs to be matched. For example: // -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. // If the value is an empty string, it will match any value if the key is set. @@ -255,9 +265,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { // HeadersRegexp accepts a sequence of key/value pairs, where the value has regex // support. For example: // -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both the request header matches both regular expressions. // If the value is an empty string, it will match any value if the key is set. @@ -283,10 +293,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") +// r := mux.NewRouter().NewRoute() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -342,11 +352,11 @@ func (r *Route) Methods(methods ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) +// r := mux.NewRouter().NewRoute() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -377,8 +387,8 @@ func (r *Route) PathPrefix(tpl string) *Route { // It accepts a sequence of key/value pairs. Values may define variables. // For example: // -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// r := mux.NewRouter().NewRoute() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. @@ -473,11 +483,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // // It will test the inner routes only if the parent route matched. For example: // -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// r := mux.NewRouter().NewRoute() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) // // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. @@ -497,36 +507,36 @@ func (r *Route) Subrouter() *Router { // It accepts a sequence of key/value pairs for the route variables. For // example, given this route: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") // // ...a URL for it can be built using: // -// url, err := r.Get("article").URL("category", "technology", "id", "42") +// url, err := r.Get("article").URL("category", "technology", "id", "42") // // ...which will return an url.URL with the following path: // -// "/articles/technology/42" +// "/articles/technology/42" // // This also works for host variables: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") // -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") // // The scheme of the resulting url will be the first argument that was passed to Schemes: // -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() +// // url.String() will be "https://example.com" +// r := mux.NewRouter().NewRoute() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() // // All variables defined in the route are required, and their values must // conform to the corresponding patterns. @@ -718,6 +728,25 @@ func (r *Route) GetHostTemplate() (string, error) { return r.regexp.host.template, nil } +// GetVarNames returns the names of all variables added by regexp matchers +// These can be used to know which route variables should be passed into r.URL() +func (r *Route) GetVarNames() ([]string, error) { + if r.err != nil { + return nil, r.err + } + var varNames []string + if r.regexp.host != nil { + varNames = append(varNames, r.regexp.host.varsN...) + } + if r.regexp.path != nil { + varNames = append(varNames, r.regexp.path.varsN...) + } + for _, regx := range r.regexp.queries { + varNames = append(varNames, regx.varsN...) + } + return varNames, nil +} + // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 00000000000..2940ec92ac2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index cd3fcd1ef72..84039fec687 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -1,25 +1 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.idea/ -*.iml +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 00000000000..34882139e1f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS deleted file mode 100644 index 1931f400682..00000000000 --- a/vendor/github.com/gorilla/websocket/AUTHORS +++ /dev/null @@ -1,9 +0,0 @@ -# This is the official list of Gorilla WebSocket authors for copyright -# purposes. -# -# Please keep the list sorted. - -Gary Burd -Google LLC (https://opensource.google.com/) -Joachim Bauch - diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE index 9171c972252..bb9d80bc9b6 100644 --- a/vendor/github.com/gorilla/websocket/LICENSE +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 00000000000..603a63f50a3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 2517a28715f..1fd5e9c4e79 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -1,17 +1,14 @@ -# Gorilla WebSocket +# gorilla/websocket -[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) -[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) -Gorilla WebSocket is a [Go](http://golang.org/) implementation of the -[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) ---- - -⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** - ---- ### Documentation @@ -20,6 +17,7 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the * [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) * [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) ### Status @@ -36,4 +34,3 @@ package API is stable. The Gorilla WebSocket package passes the server tests in the [Autobahn Test Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). - diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 2efd83555d3..815b0ca5c8f 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -9,14 +9,18 @@ import ( "context" "crypto/tls" "errors" + "fmt" "io" - "io/ioutil" + "log" + "net" "net/http" "net/http/httptrace" "net/url" "strings" "time" + + "golang.org/x/net/proxy" ) // ErrBadHandshake is returned when the server response to opening handshake is @@ -224,6 +228,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) @@ -289,7 +294,9 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } err = c.SetDeadline(deadline) if err != nil { - c.Close() + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } return c, nil @@ -303,7 +310,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h return nil, nil, err } if proxyURL != nil { - dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) if err != nil { return nil, nil, err } @@ -318,18 +325,20 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h } netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } if trace != nil && trace.GotConn != nil { trace.GotConn(httptrace.GotConnInfo{ Conn: netConn, }) } - if err != nil { - return nil, nil, err - } defer func() { if netConn != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } } }() @@ -370,6 +379,17 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h resp, err := http.ReadResponse(conn.br, req) if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } return nil, nil, err } @@ -388,7 +408,7 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h // debugging. buf := make([]byte, 1024) n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) return nil, resp, ErrBadHandshake } @@ -406,17 +426,19 @@ func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader h break } - resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } netConn = nil // to avoid close in defer. return conn, resp, nil } func cloneTLSConfig(cfg *tls.Config) *tls.Config { if cfg == nil { - return &tls.Config{} + return &tls.Config{MinVersion: tls.VersionTLS12} } return cfg.Clone() } diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go index 813ffb1e843..9fed0ef521c 100644 --- a/vendor/github.com/gorilla/websocket/compression.go +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -8,6 +8,7 @@ import ( "compress/flate" "errors" "io" + "log" "strings" "sync" ) @@ -33,7 +34,9 @@ func decompressNoContextTakeover(r io.Reader) io.ReadCloser { "\x01\x00\x00\xff\xff" fr, _ := flateReaderPool.Get().(io.ReadCloser) - fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } return &flateReadWrapper{fr} } @@ -132,7 +135,9 @@ func (r *flateReadWrapper) Read(p []byte) (int, error) { // Preemptively place the reader back in the pool. This helps with // scenarios where the application does not call NextReader() soon after // this final read. - r.Close() + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } } return n, err } diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index 331eebc8500..221e6cf7988 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -6,11 +6,11 @@ package websocket import ( "bufio" + "crypto/rand" "encoding/binary" "errors" "io" - "io/ioutil" - "math/rand" + "log" "net" "strconv" "strings" @@ -181,13 +181,20 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k } func hideTempErr(err error) error { - if e, ok := err.(net.Error); ok && e.Temporary() { + if e, ok := err.(net.Error); ok { err = &netError{msg: e.Error(), timeout: e.Timeout()} } return err @@ -372,7 +379,9 @@ func (c *Conn) read(n int) ([]byte, error) { if err == io.EOF { err = errUnexpectedEOF } - c.br.Discard(len(p)) + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } return p, err } @@ -387,7 +396,9 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } if len(buf1) == 0 { _, err = c.conn.Write(buf0) } else { @@ -397,7 +408,7 @@ func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error return c.writeFatal(err) } if frameType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return nil } @@ -438,7 +449,7 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er d := 1000 * time.Hour if !deadline.IsZero() { - d = deadline.Sub(time.Now()) + d = time.Until(deadline) if d < 0 { return errWriteTimeout } @@ -460,13 +471,15 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er return err } - c.conn.SetWriteDeadline(deadline) + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } _, err = c.conn.Write(buf) if err != nil { return c.writeFatal(err) } if messageType == CloseMessage { - c.writeFatal(ErrCloseSent) + _ = c.writeFatal(ErrCloseSent) } return err } @@ -477,7 +490,9 @@ func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - c.writer.Close() + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } c.writer = nil } @@ -630,7 +645,7 @@ func (w *messageWriter) flushFrame(final bool, extra []byte) error { } if final { - w.endMessage(errWriteClosed) + _ = w.endMessage(errWriteClosed) return nil } @@ -795,7 +810,7 @@ func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. if c.readRemaining > 0 { - if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { return noFrame, err } } @@ -817,7 +832,9 @@ func (c *Conn) advanceFrame() (int, error) { rsv2 := p[0]&rsv2Bit != 0 rsv3 := p[0]&rsv3Bit != 0 mask := p[1]&maskBit != 0 - c.setReadRemaining(int64(p[1] & 0x7f)) + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } c.readDecompress = false if rsv1 { @@ -922,7 +939,9 @@ func (c *Conn) advanceFrame() (int, error) { } if c.readLimit > 0 && c.readLength > c.readLimit { - c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } return noFrame, ErrReadLimit } @@ -934,7 +953,9 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { payload, err = c.read(int(c.readRemaining)) - c.setReadRemaining(0) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } if err != nil { return noFrame, err } @@ -981,7 +1002,9 @@ func (c *Conn) handleProtocolError(message string) error { if len(data) > maxControlFramePayloadSize { data = data[:maxControlFramePayloadSize] } - c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } return errors.New("websocket: " + message) } @@ -998,7 +1021,9 @@ func (c *Conn) handleProtocolError(message string) error { func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { // Close previous reader, only relevant for decompression. if c.reader != nil { - c.reader.Close() + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } c.reader = nil } @@ -1054,7 +1079,9 @@ func (r *messageReader) Read(b []byte) (int, error) { } rem := c.readRemaining rem -= int64(n) - c.setReadRemaining(rem) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } if c.readRemaining > 0 && c.readErr == io.EOF { c.readErr = errUnexpectedEOF } @@ -1094,7 +1121,7 @@ func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { if err != nil { return messageType, nil, err } - p, err = ioutil.ReadAll(r) + p, err = io.ReadAll(r) return messageType, p, err } @@ -1136,7 +1163,9 @@ func (c *Conn) SetCloseHandler(h func(code int, text string) error) { if h == nil { h = func(code int, text string) error { message := FormatCloseMessage(code, "") - c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } return nil } } @@ -1161,7 +1190,7 @@ func (c *Conn) SetPingHandler(h func(appData string) error) { err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) if err == ErrCloseSent { return nil - } else if e, ok := err.(net.Error); ok && e.Temporary() { + } else if _, ok := err.(net.Error); ok { return nil } return err @@ -1189,8 +1218,16 @@ func (c *Conn) SetPongHandler(h func(appData string) error) { c.handlePong = h } +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + // UnderlyingConn returns the internal net.Conn. This can be used to further // modifications to connection specific flags. +// Deprecated: Use the NetConn method. func (c *Conn) UnderlyingConn() net.Conn { return c.conn } diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go index d0742bf2a55..67d0968be83 100644 --- a/vendor/github.com/gorilla/websocket/mask.go +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -9,6 +9,7 @@ package websocket import "unsafe" +// #nosec G103 -- (CWE-242) Has been audited const wordSize = int(unsafe.Sizeof(uintptr(0))) func maskBytes(key [4]byte, pos int, b []byte) int { @@ -22,6 +23,7 @@ func maskBytes(key [4]byte, pos int, b []byte) int { } // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { n = wordSize - n for i := range b[:n] { @@ -36,11 +38,13 @@ func maskBytes(key [4]byte, pos int, b []byte) int { for i := range k { k[i] = key[(pos+i)&3] } + //#nosec G103 -- (CWE-242) Has been audited kw := *(*uintptr)(unsafe.Pointer(&k)) // Mask one word at a time. n := (len(b) / wordSize) * wordSize for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go index e0f466b72fb..80f55d1eacc 100644 --- a/vendor/github.com/gorilla/websocket/proxy.go +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -8,10 +8,13 @@ import ( "bufio" "encoding/base64" "errors" + "log" "net" "net/http" "net/url" "strings" + + "golang.org/x/net/proxy" ) type netDialerFunc func(network, addr string) (net.Conn, error) @@ -21,7 +24,7 @@ func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { } func init() { - proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil }) } @@ -55,7 +58,9 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) } if err := connectReq.Write(conn); err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } @@ -64,12 +69,16 @@ func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) br := bufio.NewReader(conn) resp, err := http.ReadResponse(br, connectReq) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } return nil, err } if resp.StatusCode != 200 { - conn.Close() + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } f := strings.SplitN(resp.Status, " ", 2) return nil, errors.New(f[1]) } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index 24d53b38abe..1e720e1da47 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -8,6 +8,7 @@ import ( "bufio" "errors" "io" + "log" "net/http" "net/url" "strings" @@ -154,8 +155,8 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } challengeKey := r.Header.Get("Sec-Websocket-Key") - if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") } subprotocol := u.selectSubprotocol(r, responseHeader) @@ -183,7 +184,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade } if brw.Reader.Buffered() > 0 { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, errors.New("websocket: client sent data before handshake is complete") } @@ -248,17 +251,34 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, "\r\n"...) // Clear deadlines set by HTTP server. - netConn.SetDeadline(time.Time{}) + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } if _, err = netConn.Write(p); err != nil { - netConn.Close() + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } return nil, err } if u.HandshakeTimeout > 0 { - netConn.SetWriteDeadline(time.Time{}) + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } } return c, nil @@ -356,8 +376,12 @@ func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { // bufio.Writer's underlying writer. var wh writeHook bw.Reset(&wh) - bw.WriteByte(0) - bw.Flush() + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } bw.Reset(originalWriter) diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go index a62b68ccb11..7f386453481 100644 --- a/vendor/github.com/gorilla/websocket/tls_handshake.go +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -1,6 +1,3 @@ -//go:build go1.17 -// +build go1.17 - package websocket import ( diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go deleted file mode 100644 index e1b2b44f6e6..00000000000 --- a/vendor/github.com/gorilla/websocket/tls_handshake_116.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -package websocket - -import ( - "context" - "crypto/tls" -) - -func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { - if err := tlsConn.Handshake(); err != nil { - return err - } - if !cfg.InsecureSkipVerify { - if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index 7bf2f66c674..9b1a629bff4 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -6,7 +6,7 @@ package websocket import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 "encoding/base64" "io" "net/http" @@ -17,7 +17,7 @@ import ( var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { - h := sha1.New() + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 h.Write([]byte(challengeKey)) h.Write(keyGUID) return base64.StdEncoding.EncodeToString(h.Sum(nil)) @@ -281,3 +281,18 @@ headers: } return result } + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go deleted file mode 100644 index 2e668f6b882..00000000000 --- a/vendor/github.com/gorilla/websocket/x_net_proxy.go +++ /dev/null @@ -1,473 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy - -// Package proxy provides support for a variety of protocols to proxy network -// data. -// - -package websocket - -import ( - "errors" - "io" - "net" - "net/url" - "os" - "strconv" - "strings" - "sync" -) - -type proxy_direct struct{} - -// Direct is a direct proxy: one that makes network connections directly. -var proxy_Direct = proxy_direct{} - -func (proxy_direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type proxy_PerHost struct { - def, bypass proxy_Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { - return &proxy_PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *proxy_PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *proxy_PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *proxy_PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *proxy_PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} - -// A Dialer is a means to establish a connection. -type proxy_Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type proxy_Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy related variables in -// the environment. -func proxy_FromEnvironment() proxy_Dialer { - allProxy := proxy_allProxyEnv.Get() - if len(allProxy) == 0 { - return proxy_Direct - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return proxy_Direct - } - proxy, err := proxy_FromURL(proxyURL, proxy_Direct) - if err != nil { - return proxy_Direct - } - - noProxy := proxy_noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := proxy_NewPerHost(proxy, proxy_Direct) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { - if proxy_proxySchemes == nil { - proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) - } - proxy_proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { - var auth *proxy_Auth - if u.User != nil { - auth = new(proxy_Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5": - return proxy_SOCKS5("tcp", u.Host, auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxy_proxySchemes != nil { - if f, ok := proxy_proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - proxy_allProxyEnv = &proxy_envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - proxy_noProxyEnv = &proxy_envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type proxy_envOnce struct { - names []string - once sync.Once - val string -} - -func (e *proxy_envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *proxy_envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address -// with an optional username and password. See RFC 1928 and RFC 1929. -func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { - s := &proxy_socks5{ - network: network, - addr: addr, - forward: forward, - } - if auth != nil { - s.user = auth.User - s.password = auth.Password - } - - return s, nil -} - -type proxy_socks5 struct { - user, password string - network, addr string - forward proxy_Dialer -} - -const proxy_socks5Version = 5 - -const ( - proxy_socks5AuthNone = 0 - proxy_socks5AuthPassword = 2 -) - -const proxy_socks5Connect = 1 - -const ( - proxy_socks5IP4 = 1 - proxy_socks5Domain = 3 - proxy_socks5IP6 = 4 -) - -var proxy_socks5Errors = []string{ - "", - "general failure", - "connection forbidden", - "network unreachable", - "host unreachable", - "connection refused", - "TTL expired", - "command not supported", - "address type not supported", -} - -// Dial connects to the address addr on the given network via the SOCKS5 proxy. -func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) - } - - conn, err := s.forward.Dial(s.network, s.addr) - if err != nil { - return nil, err - } - if err := s.connect(conn, addr); err != nil { - conn.Close() - return nil, err - } - return conn, nil -} - -// connect takes an existing connection to a socks5 proxy server, -// and commands the server to extend that connection to target, -// which must be a canonical address with a host and port. -func (s *proxy_socks5) connect(conn net.Conn, target string) error { - host, portStr, err := net.SplitHostPort(target) - if err != nil { - return err - } - - port, err := strconv.Atoi(portStr) - if err != nil { - return errors.New("proxy: failed to parse port number: " + portStr) - } - if port < 1 || port > 0xffff { - return errors.New("proxy: port number out of range: " + portStr) - } - - // the size here is just an estimate - buf := make([]byte, 0, 6+len(host)) - - buf = append(buf, proxy_socks5Version) - if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { - buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) - } else { - buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) - } - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - if buf[0] != 5 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) - } - if buf[1] == 0xff { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") - } - - // See RFC 1929 - if buf[1] == proxy_socks5AuthPassword { - buf = buf[:0] - buf = append(buf, 1 /* password protocol version */) - buf = append(buf, uint8(len(s.user))) - buf = append(buf, s.user...) - buf = append(buf, uint8(len(s.password))) - buf = append(buf, s.password...) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if buf[1] != 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") - } - } - - buf = buf[:0] - buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) - - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - buf = append(buf, proxy_socks5IP4) - ip = ip4 - } else { - buf = append(buf, proxy_socks5IP6) - } - buf = append(buf, ip...) - } else { - if len(host) > 255 { - return errors.New("proxy: destination host name too long: " + host) - } - buf = append(buf, proxy_socks5Domain) - buf = append(buf, byte(len(host))) - buf = append(buf, host...) - } - buf = append(buf, byte(port>>8), byte(port)) - - if _, err := conn.Write(buf); err != nil { - return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - if _, err := io.ReadFull(conn, buf[:4]); err != nil { - return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - failure := "unknown error" - if int(buf[1]) < len(proxy_socks5Errors) { - failure = proxy_socks5Errors[buf[1]] - } - - if len(failure) > 0 { - return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) - } - - bytesToDiscard := 0 - switch buf[3] { - case proxy_socks5IP4: - bytesToDiscard = net.IPv4len - case proxy_socks5IP6: - bytesToDiscard = net.IPv6len - case proxy_socks5Domain: - _, err := io.ReadFull(conn, buf[:1]) - if err != nil { - return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - bytesToDiscard = int(buf[0]) - default: - return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) - } - - if cap(buf) < bytesToDiscard { - buf = make([]byte, bytesToDiscard) - } else { - buf = buf[:bytesToDiscard] - } - if _, err := io.ReadFull(conn, buf); err != nil { - return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - // Also need to discard the port number - if _, err := io.ReadFull(conn, buf[:2]); err != nil { - return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) - } - - return nil -} diff --git a/vendor/github.com/longhorn/backing-image-manager/api/types.go b/vendor/github.com/longhorn/backing-image-manager/api/types.go index 5446553fcb3..905316ad308 100644 --- a/vendor/github.com/longhorn/backing-image-manager/api/types.go +++ b/vendor/github.com/longhorn/backing-image-manager/api/types.go @@ -13,6 +13,7 @@ type BackingImage struct { Name string `json:"name"` UUID string `json:"uuid"` Size int64 `json:"size"` + VirtualSize int64 `json:"virtualSize"` ExpectedChecksum string `json:"expectedChecksum"` Status BackingImageStatus `json:"status"` @@ -32,6 +33,7 @@ func RPCToBackingImage(obj *rpc.BackingImageResponse) *BackingImage { Name: obj.Spec.Name, UUID: obj.Spec.Uuid, Size: obj.Spec.Size, + VirtualSize: obj.Spec.VirtualSize, ExpectedChecksum: obj.Spec.Checksum, Status: BackingImageStatus{ @@ -112,6 +114,7 @@ type FileInfo struct { FilePath string `json:"filePath"` UUID string `json:"uuid"` Size int64 `json:"size"` + VirtualSize int64 `json:"virtualSize"` State string `json:"state"` Progress int `json:"progress"` ProcessedSize int64 `json:"processedSize"` diff --git a/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.pb.go b/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.pb.go index b3012f62070..c237c6c84dd 100644 --- a/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.pb.go +++ b/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v4.24.3 +// protoc v4.25.1 // source: github.com/longhorn/backing-image-manager/pkg/rpc/rpc.proto package rpc @@ -30,10 +30,11 @@ type BackingImageSpec struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` - Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Checksum string `protobuf:"bytes,4,opt,name=checksum,proto3" json:"checksum,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Checksum string `protobuf:"bytes,4,opt,name=checksum,proto3" json:"checksum,omitempty"` + VirtualSize int64 `protobuf:"varint,5,opt,name=virtualSize,proto3" json:"virtualSize,omitempty"` } func (x *BackingImageSpec) Reset() { @@ -96,6 +97,13 @@ func (x *BackingImageSpec) GetChecksum() string { return "" } +func (x *BackingImageSpec) GetVirtualSize() int64 { + if x != nil { + return x.VirtualSize + } + return 0 +} + type BackingImageStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -989,225 +997,227 @@ var file_github_com_longhorn_backing_image_manager_pkg_rpc_rpc_proto_rawDesc = [ 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x6a, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, - 0x53, 0x70, 0x65, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x22, 0xe2, 0x01, 0x0a, - 0x12, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x10, 0x73, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, - 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x22, 0xb4, 0x01, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x04, 0x73, 0x70, - 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, - 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x70, 0x65, 0x63, - 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x50, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, - 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, - 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x37, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, - 0x64, 0x22, 0x34, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0e, 0x62, 0x61, 0x63, 0x6b, - 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x45, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, - 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x1a, 0x7c, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, - 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x50, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, - 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x82, 0x02, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x48, - 0x0a, 0x21, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x62, 0x61, 0x63, 0x6b, 0x69, - 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x41, 0x70, - 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x25, 0x62, 0x61, 0x63, 0x6b, - 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x41, 0x70, 0x69, 0x4d, - 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x0a, 0x0b, 0x53, 0x79, 0x6e, - 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, - 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, - 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, - 0x73, 0x70, 0x65, 0x63, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1d, - 0x0a, 0x0a, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x8a, 0x01, - 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, - 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6c, - 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, - 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, - 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x40, 0x0a, 0x16, 0x50, 0x72, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x57, 0x0a, 0x17, - 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x72, 0x63, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x73, 0x72, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x69, - 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, - 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, - 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x29, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x7d, 0x0a, - 0x14, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x32, 0xa7, 0x09, 0x0a, - 0x1a, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x57, 0x0a, 0x06, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x33, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, - 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x30, 0x2e, 0x6c, 0x6f, - 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, - 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x04, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x32, 0x2e, 0x6c, 0x6f, + 0x22, 0x8c, 0x01, 0x0a, 0x10, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x20, 0x0a, + 0x0b, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, + 0xe2, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x73, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x4d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x22, 0xb4, 0x01, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, + 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x12, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x35, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, - 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, - 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x31, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, - 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x79, - 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, + 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, + 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x50, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x04, 0x53, 0x65, 0x6e, 0x64, 0x12, - 0x31, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, - 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, - 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x05, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x12, 0x32, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, - 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, - 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x90, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3c, 0x2e, 0x6c, 0x6f, - 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, + 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x37, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x75, 0x69, 0x64, 0x22, 0x34, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x0c, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0e, 0x62, + 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x62, 0x61, 0x63, 0x6b, + 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x1a, 0x7c, 0x0a, 0x12, 0x42, 0x61, 0x63, + 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x50, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3a, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, + 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x82, 0x02, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, + 0x65, 0x12, 0x48, 0x0a, 0x21, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x62, 0x61, + 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x25, 0x62, + 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x20, 0x62, 0x61, 0x63, 0x6b, + 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x41, + 0x70, 0x69, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x0a, 0x0b, + 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x04, 0x73, + 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, - 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x0c, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x6c, 0x6f, 0x6e, + 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x70, 0x65, + 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, + 0x72, 0x6f, 0x6d, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x54, 0x0a, 0x0b, 0x53, 0x65, + 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x22, 0x8a, 0x01, 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4a, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x36, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, + 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, + 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x2e, 0x0a, + 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x40, 0x0a, + 0x16, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, + 0x57, 0x0a, 0x17, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x72, + 0x63, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x72, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, + 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2d, 0x0a, 0x12, + 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x29, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x7d, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x32, + 0xa7, 0x09, 0x0a, 0x1a, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x57, + 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x33, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, + 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x30, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, + 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x3a, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, + 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, + 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x32, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, + 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x47, + 0x65, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x35, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, - 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x63, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x77, 0x0a, 0x04, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x31, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, + 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x04, 0x53, 0x65, + 0x6e, 0x64, 0x12, 0x31, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, - 0x87, 0x01, 0x0a, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x39, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, - 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, - 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6c, 0x6f, - 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x05, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x62, 0x61, - 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x2d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x79, 0x0a, 0x05, 0x46, 0x65, 0x74, 0x63, 0x68, 0x12, 0x32, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, + 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x90, 0x01, 0x0a, 0x0f, 0x50, + 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3c, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, + 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, + 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x6f, 0x77, 0x6e, 0x6c, + 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, + 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, + 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x87, 0x01, 0x0a, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x39, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x2e, 0x70, 0x6b, 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, + 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, + 0x67, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x6b, + 0x67, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x05, + 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, + 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x2d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2d, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x70, 0x63, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.proto b/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.proto index 9dbf137bf53..10ed98abfb0 100644 --- a/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.proto +++ b/vendor/github.com/longhorn/backing-image-manager/pkg/rpc/rpc.proto @@ -26,6 +26,7 @@ message BackingImageSpec { string uuid = 2; int64 size = 3; string checksum = 4; + int64 virtualSize = 5; } message BackingImageStatus { diff --git a/vendor/github.com/longhorn/backing-image-manager/pkg/util/util.go b/vendor/github.com/longhorn/backing-image-manager/pkg/util/util.go index f46bc5e36ef..30cff9df511 100644 --- a/vendor/github.com/longhorn/backing-image-manager/pkg/util/util.go +++ b/vendor/github.com/longhorn/backing-image-manager/pkg/util/util.go @@ -1,7 +1,6 @@ package util import ( - "bufio" "bytes" "compress/gzip" "context" @@ -15,7 +14,6 @@ import ( "os/exec" "path/filepath" "regexp" - "strings" "time" "github.com/pkg/errors" @@ -152,6 +150,7 @@ type SyncingFileConfig struct { FilePath string `json:"name"` UUID string `json:"uuid"` Size int64 `json:"size"` + VirtualSize int64 `json:"virtualSize"` ExpectedChecksum string `json:"expectedChecksum"` CurrentChecksum string `json:"currentChecksum"` ModificationTime string `json:"modificationTime"` @@ -232,48 +231,60 @@ func ExecuteWithTimeout(timeout time.Duration, envs []string, binary string, arg return output.String(), nil } -func DetectFileFormat(filePath string) (string, error) { +type QemuImgInfo struct { + // For qcow2 files, VirtualSize may be larger than the physical + // image size on disk. For raw files, `qemu-img info` will report + // VirtualSize as being the same as the physical file size. + VirtualSize int64 `json:"virtual-size"` + Format string `json:"format"` +} + +func GetQemuImgInfo(filePath string) (imgInfo QemuImgInfo, err error) { /* Example command outputs - $ qemu-img info parrot.raw - image: parrot.raw - file format: raw - virtual size: 32M (33554432 bytes) - disk size: 2.2M - - $ qemu-img info parrot.qcow2 - image: parrot.qcow2 - file format: qcow2 - virtual size: 32M (33554432 bytes) - disk size: 2.3M - cluster_size: 65536 - Format specific information: - compat: 1.1 - lazy refcounts: false - refcount bits: 16 - corrupt: false + $ qemu-img info --output=json SLE-Micro.x86_64-5.5.0-Default-qcow-GM.qcow2 + { + "virtual-size": 21474836480, + "filename": "SLE-Micro.x86_64-5.5.0-Default-qcow-GM.qcow2", + "cluster-size": 65536, + "format": "qcow2", + "actual-size": 1001656320, + "format-specific": { + "type": "qcow2", + "data": { + "compat": "1.1", + "compression-type": "zlib", + "lazy-refcounts": false, + "refcount-bits": 16, + "corrupt": false, + "extended-l2": false + } + }, + "dirty-flag": false + } + + $ qemu-img info --output=json SLE-15-SP5-Full-x86_64-GM-Media1.iso + { + "virtual-size": 14548992000, + "filename": "SLE-15-SP5-Full-x86_64-GM-Media1.iso", + "format": "raw", + "actual-size": 14548996096, + "dirty-flag": false + } */ - output, err := Execute([]string{}, QemuImgBinary, "info", filePath) + output, err := Execute([]string{}, QemuImgBinary, "info", "--output=json", filePath) if err != nil { - return "", err + return } - - scanner := bufio.NewScanner(strings.NewReader(output)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, "file format: ") { - return strings.TrimPrefix(line, "file format: "), nil - } - } - - return "", fmt.Errorf("cannot find the file format in the output %s", output) + err = json.Unmarshal([]byte(output), &imgInfo) + return } func ConvertFromRawToQcow2(filePath string) error { - if format, err := DetectFileFormat(filePath); err != nil { + if imgInfo, err := GetQemuImgInfo(filePath); err != nil { return err - } else if format == "qcow2" { + } else if imgInfo.Format == "qcow2" { return nil } diff --git a/vendor/github.com/longhorn/backupstore/config.go b/vendor/github.com/longhorn/backupstore/config.go index 35cd5a52584..a996ba0dbc0 100644 --- a/vendor/github.com/longhorn/backupstore/config.go +++ b/vendor/github.com/longhorn/backupstore/config.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "path/filepath" + "runtime" "strings" "time" @@ -139,6 +140,27 @@ func volumeExists(driver BackupStoreDriver, volumeName string) bool { return driver.FileExists(getVolumeFilePath(volumeName)) } +// volumeFolderExists checks if volume folder exists on backupstore +// by listing all the backup volume name based on the folders on the backupstore +// since s3 does not support checking folder exist. +func volumeFolderExists(driver BackupStoreDriver, volumeName string) (bool, error) { + jobQueues := workerpool.New(runtime.NumCPU() * 16) + defer jobQueues.StopWait() + + volumeNames, err := getVolumeNames(jobQueues, driver) + if err != nil { + return false, err + } + + for _, name := range volumeNames { + if volumeName == name { + return true, nil + } + } + + return false, nil +} + func getVolumePath(volumeName string) string { checksum := util.GetChecksum([]byte(volumeName)) volumeLayer1 := checksum[0:VOLUME_SEPARATE_LAYER1] diff --git a/vendor/github.com/longhorn/backupstore/deltablock.go b/vendor/github.com/longhorn/backupstore/deltablock.go index d011b3a2a00..73851e53b54 100644 --- a/vendor/github.com/longhorn/backupstore/deltablock.go +++ b/vendor/github.com/longhorn/backupstore/deltablock.go @@ -1051,6 +1051,17 @@ func DeleteBackupVolume(volumeName string, destURL string) error { if err != nil { return err } + + backupVolumeFolderExists, err := volumeFolderExists(bsDriver, volumeName) + if err != nil { + return err + } + + // No need to lock and remove volume if it does not exist. + if !backupVolumeFolderExists { + return nil + } + lock, err := New(bsDriver, volumeName, DELETION_LOCK) if err != nil { return err diff --git a/vendor/github.com/longhorn/backupstore/renovate.json b/vendor/github.com/longhorn/backupstore/renovate.json new file mode 100644 index 00000000000..c297cdcff13 --- /dev/null +++ b/vendor/github.com/longhorn/backupstore/renovate.json @@ -0,0 +1,3 @@ +{ + "extends": ["github>longhorn/release:renovate-default"] +} diff --git a/vendor/github.com/longhorn/go-common-libs/io/file.go b/vendor/github.com/longhorn/go-common-libs/io/file.go index 957b2d7e622..945be4d7861 100644 --- a/vendor/github.com/longhorn/go-common-libs/io/file.go +++ b/vendor/github.com/longhorn/go-common-libs/io/file.go @@ -233,6 +233,7 @@ func GetDiskStat(path string) (diskStat types.DiskStat, err error) { DiskID: fsidFormatted, Path: path, Type: usage.Fstype, + Driver: types.DiskDriverNone, FreeBlocks: int64(statfs.Bfree), TotalBlocks: int64(statfs.Blocks), BlockSize: statfs.Bsize, diff --git a/vendor/github.com/longhorn/go-common-libs/ns/crypto.go b/vendor/github.com/longhorn/go-common-libs/ns/crypto.go index d8988ddaf53..5638b37dde5 100644 --- a/vendor/github.com/longhorn/go-common-libs/ns/crypto.go +++ b/vendor/github.com/longhorn/go-common-libs/ns/crypto.go @@ -65,5 +65,5 @@ func (nsexec *Executor) CryptsetupWithPassphrase(passphrase string, args []strin // For Talos Linux, cryptsetup comes pre-installed in the host namespace // (ref: https://github.com/siderolabs/pkgs/blob/release-1.4/reproducibility/pkg.yaml#L10) // for the [Disk Encryption](https://www.talos.dev/v1.4/talos-guides/configuration/disk-encryption/). - return nsexec.ExecuteWithStdin(types.BinaryCryptsetup, args, passphrase, timeout) + return nsexec.ExecuteWithStdin(nil, types.BinaryCryptsetup, args, passphrase, timeout) } diff --git a/vendor/github.com/longhorn/go-common-libs/ns/executor.go b/vendor/github.com/longhorn/go-common-libs/ns/executor.go index 617f21e0ae2..a850e7710cc 100644 --- a/vendor/github.com/longhorn/go-common-libs/ns/executor.go +++ b/vendor/github.com/longhorn/go-common-libs/ns/executor.go @@ -46,7 +46,7 @@ func NewNamespaceExecutor(processName, procDirectory string, namespaces []types. } // prepareCommandArgs prepares the nsenter command arguments. -func (nsexec *Executor) prepareCommandArgs(binary string, args []string) []string { +func (nsexec *Executor) prepareCommandArgs(binary string, args, envs []string) []string { cmdArgs := []string{} for _, ns := range nsexec.namespaces { nsPath := filepath.Join(nsexec.nsDirectory, ns.String()) @@ -59,24 +59,29 @@ func (nsexec *Executor) prepareCommandArgs(binary string, args []string) []strin cmdArgs = append(cmdArgs, "--net="+nsPath) } } + if len(envs) > 0 { + cmdArgs = append(cmdArgs, "env", "-i") + cmdArgs = append(cmdArgs, envs...) + } + cmdArgs = append(cmdArgs, binary) return append(cmdArgs, args...) } // Execute executes the command in the namespace. If NsDirectory is empty, // it will execute the command in the current namespace. -func (nsexec *Executor) Execute(binary string, args []string, timeout time.Duration) (string, error) { - return nsexec.executor.Execute(nil, types.NsBinary, nsexec.prepareCommandArgs(binary, args), timeout) +func (nsexec *Executor) Execute(envs []string, binary string, args []string, timeout time.Duration) (string, error) { + return nsexec.executor.Execute(nil, types.NsBinary, nsexec.prepareCommandArgs(binary, args, envs), timeout) } // ExecuteWithStdin executes the command in the namespace with stdin. // If NsDirectory is empty, it will execute the command in the current namespace. -func (nsexec *Executor) ExecuteWithStdin(binary string, args []string, stdinString string, timeout time.Duration) (string, error) { - return nsexec.executor.ExecuteWithStdin(types.NsBinary, nsexec.prepareCommandArgs(binary, args), stdinString, timeout) +func (nsexec *Executor) ExecuteWithStdin(envs []string, binary string, args []string, stdinString string, timeout time.Duration) (string, error) { + return nsexec.executor.ExecuteWithStdin(types.NsBinary, nsexec.prepareCommandArgs(binary, args, envs), stdinString, timeout) } // ExecuteWithStdinPipe executes the command in the namespace with stdin pipe. // If NsDirectory is empty, it will execute the command in the current namespace. -func (nsexec *Executor) ExecuteWithStdinPipe(binary string, args []string, stdinString string, timeout time.Duration) (string, error) { - return nsexec.executor.ExecuteWithStdinPipe(types.NsBinary, nsexec.prepareCommandArgs(binary, args), stdinString, timeout) +func (nsexec *Executor) ExecuteWithStdinPipe(envs []string, binary string, args []string, stdinString string, timeout time.Duration) (string, error) { + return nsexec.executor.ExecuteWithStdinPipe(types.NsBinary, nsexec.prepareCommandArgs(binary, args, envs), stdinString, timeout) } diff --git a/vendor/github.com/longhorn/go-common-libs/ns/filelock.go b/vendor/github.com/longhorn/go-common-libs/ns/filelock.go index 698cca6cc92..b0ce92c6024 100644 --- a/vendor/github.com/longhorn/go-common-libs/ns/filelock.go +++ b/vendor/github.com/longhorn/go-common-libs/ns/filelock.go @@ -40,7 +40,7 @@ func LockFile(path string) (result *os.File, err error) { // FileLock is a struct responsible for locking a file. type FileLock struct { FilePath string // The path of the file to lock. - File *os.File // The file handle aquired after successful lock. + File *os.File // The file handle acquired after successful lock. Timeout time.Duration // The maximum time to wait for lock acquisition. done chan struct{} // A channel for signaling lock release. diff --git a/vendor/github.com/longhorn/go-common-libs/sys/sys.go b/vendor/github.com/longhorn/go-common-libs/sys/sys.go index ea6f029a7f7..e4e23a30b22 100644 --- a/vendor/github.com/longhorn/go-common-libs/sys/sys.go +++ b/vendor/github.com/longhorn/go-common-libs/sys/sys.go @@ -56,13 +56,13 @@ func GetOSDistro(osReleaseContent string) (string, error) { // GetSystemBlockDeviceInfo returns the block device info for the system. func GetSystemBlockDeviceInfo() (map[string]types.BlockDeviceInfo, error) { - return getSystemBlockDeviceInfo(os.ReadDir, os.ReadFile) + return getSystemBlockDeviceInfo(types.SysClassBlockDirectory, os.ReadDir, os.ReadFile) } // getSystemBlockDeviceInfo returns the block device info for the system. // It injects the readDirFn and readFileFn for testing. -func getSystemBlockDeviceInfo(readDirFn func(string) ([]os.DirEntry, error), readFileFn func(string) ([]byte, error)) (map[string]types.BlockDeviceInfo, error) { - devices, err := readDirFn(types.SysClassBlockDirectory) +func getSystemBlockDeviceInfo(sysClassBlockDirectory string, readDirFn func(string) ([]os.DirEntry, error), readFileFn func(string) ([]byte, error)) (map[string]types.BlockDeviceInfo, error) { + devices, err := readDirFn(sysClassBlockDirectory) if err != nil { return nil, err } @@ -82,12 +82,12 @@ func getSystemBlockDeviceInfo(readDirFn func(string) ([]os.DirEntry, error), rea deviceInfo := make(map[string]types.BlockDeviceInfo, len(devices)) for _, device := range devices { deviceName := device.Name() - devicePath := filepath.Join(types.SysClassBlockDirectory, deviceName, "dev") + devicePath := filepath.Join(sysClassBlockDirectory, deviceName, "dev") if _, err := os.Stat(devicePath); os.IsNotExist(err) { // If the device path does not exist, check if the device path exists in the "device" directory. // Some devices such as "nvme0cn1" created from SPDK do not have "dev" file under their sys/class/block directory. - alternativeDevicePath := filepath.Join(types.SysClassBlockDirectory, deviceName, "device", "dev") + alternativeDevicePath := filepath.Join(sysClassBlockDirectory, deviceName, "device", "dev") if _, altErr := os.Stat(alternativeDevicePath); os.IsNotExist(altErr) { errs := fmt.Errorf("primary error: %w; alternative error: %w", err, altErr) logrus.WithFields(logrus.Fields{ diff --git a/vendor/github.com/longhorn/go-common-libs/types/file.go b/vendor/github.com/longhorn/go-common-libs/types/file.go index 3ebb6c9e730..e67b01dd742 100644 --- a/vendor/github.com/longhorn/go-common-libs/types/file.go +++ b/vendor/github.com/longhorn/go-common-libs/types/file.go @@ -6,10 +6,25 @@ import ( var FileLockDefaultTimeout = 24 * time.Hour +type DiskDriver string + +const ( + DiskDriverNone = DiskDriver("") + DiskDriverAuto = DiskDriver("auto") + DiskDriverAio = DiskDriver("aio") + DiskDriverNvme = DiskDriver("nvme") + DiskDriverVirtioScsi = DiskDriver("virtio-scsi") + DiskDriverVirtioBlk = DiskDriver("virtio-blk") + DiskDriverVirtioPci = DiskDriver("virtio-pci") + DiskDriverUioPciGeneric = DiskDriver("uio_pci_generic") +) + type DiskStat struct { DiskID string + Name string Path string Type string + Driver DiskDriver FreeBlocks int64 TotalBlocks int64 BlockSize int64 diff --git a/vendor/github.com/longhorn/go-common-libs/utils/longhorn_naming.go b/vendor/github.com/longhorn/go-common-libs/utils/longhorn_naming.go new file mode 100644 index 00000000000..a25e0ce745a --- /dev/null +++ b/vendor/github.com/longhorn/go-common-libs/utils/longhorn_naming.go @@ -0,0 +1,11 @@ +package utils + +import ( + "regexp" +) + +// IsEngineProcess distinguish if the process is a engine process by its name. +func IsEngineProcess(processName string) bool { + // engine process name example: pvc-5a8ee916-5989-46c6-bafc-ddbf7c802499-e-0 + return regexp.MustCompile(`.+?-e-.*\d$`).MatchString(processName) +} diff --git a/vendor/github.com/longhorn/go-common-libs/utils/mount.go b/vendor/github.com/longhorn/go-common-libs/utils/mount.go new file mode 100644 index 00000000000..f234ae1a3c0 --- /dev/null +++ b/vendor/github.com/longhorn/go-common-libs/utils/mount.go @@ -0,0 +1,14 @@ +package utils + +import ( + "k8s.io/mount-utils" +) + +func IsMountPointReadOnly(mp mount.MountPoint) bool { + for _, opt := range mp.Opts { + if opt == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/longhorn/go-iscsi-helper/types/tgtadm_errors.go b/vendor/github.com/longhorn/go-iscsi-helper/types/tgtadm_errors.go index 809278963db..1b7215c12aa 100644 --- a/vendor/github.com/longhorn/go-iscsi-helper/types/tgtadm_errors.go +++ b/vendor/github.com/longhorn/go-iscsi-helper/types/tgtadm_errors.go @@ -1,6 +1,6 @@ package types -// erros are from tgt/usr/tgtadm_error.h and tgt/usr/tgtadm.c +// errors are from tgt/usr/tgtadm_error.h and tgt/usr/tgtadm.c const ( TgtadmSuccess = "success" TgtadmUnknown = "unknown error" diff --git a/vendor/github.com/longhorn/go-spdk-helper/pkg/types/types.go b/vendor/github.com/longhorn/go-spdk-helper/pkg/types/types.go index c5c54953d34..1175d634271 100644 --- a/vendor/github.com/longhorn/go-spdk-helper/pkg/types/types.go +++ b/vendor/github.com/longhorn/go-spdk-helper/pkg/types/types.go @@ -37,3 +37,13 @@ const ( func GetNQN(name string) string { return fmt.Sprintf("%s:%s", NQNPrefix, name) } + +type DiskStatus struct { + Bdf string + Type string + Driver string + Vendor string + Numa string + Device string + BlockDevices string +} diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/instance.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/instance.go index 2164528fd2e..9be755b21a1 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/instance.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/instance.go @@ -343,6 +343,78 @@ func (c *InstanceServiceClient) VersionGet() (*meta.VersionOutput, error) { }, nil } +func (c *InstanceServiceClient) LogSetLevel(dataEngine, service, level string) error { + client := c.getControllerServiceClient() + ctx, cancel := context.WithTimeout(context.Background(), types.GRPCServiceTimeout) + defer cancel() + + driver, ok := rpc.DataEngine_value[getDataEngine(dataEngine)] + if !ok { + return fmt.Errorf("failed to set log level: invalid data engine %v", dataEngine) + } + + _, err := client.LogSetLevel(ctx, &rpc.LogSetLevelRequest{ + DataEngine: rpc.DataEngine(driver), + Level: level, + }) + return err +} + +func (c *InstanceServiceClient) LogSetFlags(dataEngine, service, flags string) error { + client := c.getControllerServiceClient() + ctx, cancel := context.WithTimeout(context.Background(), types.GRPCServiceTimeout) + defer cancel() + + driver, ok := rpc.DataEngine_value[getDataEngine(dataEngine)] + if !ok { + return fmt.Errorf("failed to set log flags: invalid data engine %v", dataEngine) + } + + _, err := client.LogSetFlags(ctx, &rpc.LogSetFlagsRequest{ + DataEngine: rpc.DataEngine(driver), + Flags: flags, + }) + return err +} + +func (c *InstanceServiceClient) LogGetLevel(dataEngine, service string) (string, error) { + client := c.getControllerServiceClient() + ctx, cancel := context.WithTimeout(context.Background(), types.GRPCServiceTimeout) + defer cancel() + + driver, ok := rpc.DataEngine_value[getDataEngine(dataEngine)] + if !ok { + return "", fmt.Errorf("failed to get log level: invalid data engine %v", dataEngine) + } + + resp, err := client.LogGetLevel(ctx, &rpc.LogGetLevelRequest{ + DataEngine: rpc.DataEngine(driver), + }) + if err != nil { + return "", err + } + return resp.Level, nil +} + +func (c *InstanceServiceClient) LogGetFlags(dataEngine, service string) (string, error) { + client := c.getControllerServiceClient() + ctx, cancel := context.WithTimeout(context.Background(), types.GRPCServiceTimeout) + defer cancel() + + driver, ok := rpc.DataEngine_value[getDataEngine(dataEngine)] + if !ok { + return "", fmt.Errorf("failed to get log flags: invalid data engine %v", dataEngine) + } + + resp, err := client.LogGetFlags(ctx, &rpc.LogGetFlagsRequest{ + DataEngine: rpc.DataEngine(driver), + }) + if err != nil { + return "", err + } + return resp.Flags, nil +} + func (c *InstanceServiceClient) CheckConnection() error { req := &healthpb.HealthCheckRequest{} _, err := c.health.Check(getContextWithGRPCTimeout(c.ctx), req) diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/proxy_volume.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/proxy_volume.go index ecb7026acec..d40fe11f451 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/proxy_volume.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/client/proxy_volume.go @@ -296,3 +296,19 @@ func (c *ProxyClient) VolumeSnapshotMaxSizeSet(dataEngine, engineName, volumeNam return nil } + +func (c *ProxyClient) RemountReadOnlyVolume(volumeName string) (err error) { + if volumeName == "" { + return fmt.Errorf("failed to remount volume, volume name is empty") + } + + req := &rpc.RemountVolumeRequest{ + VolumeName: volumeName, + } + + _, err = c.service.RemountReadOnlyVolume(getContextWithGRPCTimeout(c.ctx), req) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/common.proto b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/common.proto index ccdba6af5a3..cde1b57ffad 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/common.proto +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/common.proto @@ -13,4 +13,4 @@ enum BackendStoreDriver { enum DataEngine { DATA_ENGINE_V1 = 0; DATA_ENGINE_V2 = 1; -} \ No newline at end of file +} diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.pb.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.pb.go index 8ee70fe261b..3ca36f8accd 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.pb.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.pb.go @@ -814,6 +814,304 @@ func (x *InstanceReplaceRequest) GetTerminateSignal() string { return "" } +type LogSetLevelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataEngine DataEngine `protobuf:"varint,1,opt,name=data_engine,json=dataEngine,proto3,enum=imrpc.DataEngine" json:"data_engine,omitempty"` + Level string `protobuf:"bytes,2,opt,name=level,proto3" json:"level,omitempty"` +} + +func (x *LogSetLevelRequest) Reset() { + *x = LogSetLevelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogSetLevelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogSetLevelRequest) ProtoMessage() {} + +func (x *LogSetLevelRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogSetLevelRequest.ProtoReflect.Descriptor instead. +func (*LogSetLevelRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescGZIP(), []int{11} +} + +func (x *LogSetLevelRequest) GetDataEngine() DataEngine { + if x != nil { + return x.DataEngine + } + return DataEngine_DATA_ENGINE_V1 +} + +func (x *LogSetLevelRequest) GetLevel() string { + if x != nil { + return x.Level + } + return "" +} + +type LogSetFlagsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataEngine DataEngine `protobuf:"varint,1,opt,name=data_engine,json=dataEngine,proto3,enum=imrpc.DataEngine" json:"data_engine,omitempty"` + Flags string `protobuf:"bytes,2,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *LogSetFlagsRequest) Reset() { + *x = LogSetFlagsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogSetFlagsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogSetFlagsRequest) ProtoMessage() {} + +func (x *LogSetFlagsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogSetFlagsRequest.ProtoReflect.Descriptor instead. +func (*LogSetFlagsRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescGZIP(), []int{12} +} + +func (x *LogSetFlagsRequest) GetDataEngine() DataEngine { + if x != nil { + return x.DataEngine + } + return DataEngine_DATA_ENGINE_V1 +} + +func (x *LogSetFlagsRequest) GetFlags() string { + if x != nil { + return x.Flags + } + return "" +} + +type LogGetLevelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataEngine DataEngine `protobuf:"varint,1,opt,name=data_engine,json=dataEngine,proto3,enum=imrpc.DataEngine" json:"data_engine,omitempty"` +} + +func (x *LogGetLevelRequest) Reset() { + *x = LogGetLevelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogGetLevelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogGetLevelRequest) ProtoMessage() {} + +func (x *LogGetLevelRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogGetLevelRequest.ProtoReflect.Descriptor instead. +func (*LogGetLevelRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescGZIP(), []int{13} +} + +func (x *LogGetLevelRequest) GetDataEngine() DataEngine { + if x != nil { + return x.DataEngine + } + return DataEngine_DATA_ENGINE_V1 +} + +type LogGetLevelResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Level string `protobuf:"bytes,1,opt,name=level,proto3" json:"level,omitempty"` +} + +func (x *LogGetLevelResponse) Reset() { + *x = LogGetLevelResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogGetLevelResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogGetLevelResponse) ProtoMessage() {} + +func (x *LogGetLevelResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogGetLevelResponse.ProtoReflect.Descriptor instead. +func (*LogGetLevelResponse) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescGZIP(), []int{14} +} + +func (x *LogGetLevelResponse) GetLevel() string { + if x != nil { + return x.Level + } + return "" +} + +type LogGetFlagsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DataEngine DataEngine `protobuf:"varint,1,opt,name=data_engine,json=dataEngine,proto3,enum=imrpc.DataEngine" json:"data_engine,omitempty"` +} + +func (x *LogGetFlagsRequest) Reset() { + *x = LogGetFlagsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogGetFlagsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogGetFlagsRequest) ProtoMessage() {} + +func (x *LogGetFlagsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogGetFlagsRequest.ProtoReflect.Descriptor instead. +func (*LogGetFlagsRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescGZIP(), []int{15} +} + +func (x *LogGetFlagsRequest) GetDataEngine() DataEngine { + if x != nil { + return x.DataEngine + } + return DataEngine_DATA_ENGINE_V1 +} + +type LogGetFlagsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Flags string `protobuf:"bytes,1,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *LogGetFlagsResponse) Reset() { + *x = LogGetFlagsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogGetFlagsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogGetFlagsResponse) ProtoMessage() {} + +func (x *LogGetFlagsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogGetFlagsResponse.ProtoReflect.Descriptor instead. +func (*LogGetFlagsResponse) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescGZIP(), []int{16} +} + +func (x *LogGetFlagsResponse) GetFlags() string { + if x != nil { + return x.Flags + } + return "" +} + var File_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto protoreflect.FileDescriptor var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDesc = []byte{ @@ -973,47 +1271,91 @@ var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_ 0x6e, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x32, 0xb9, 0x04, 0x0a, 0x0f, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0e, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1c, - 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, - 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x69, 0x6d, 0x72, 0x70, - 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x43, 0x0a, 0x0b, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x47, 0x65, - 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, - 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0c, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x1b, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, - 0x0a, 0x0b, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x12, 0x19, 0x2e, - 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x6f, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x0d, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x4b, 0x0a, 0x0f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0a, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, - 0x68, 0x6f, 0x72, 0x6e, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2d, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x22, 0x5e, 0x0a, 0x12, 0x4c, 0x6f, 0x67, 0x53, + 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, + 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x5e, 0x0a, 0x12, 0x4c, 0x6f, 0x67, 0x53, + 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, + 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x48, 0x0a, 0x12, 0x4c, 0x6f, 0x67, 0x47, + 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, + 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x22, 0x2b, 0x0a, 0x13, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, + 0x48, 0x0a, 0x12, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x65, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x69, 0x6d, 0x72, + 0x70, 0x63, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x0a, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x22, 0x2b, 0x0a, 0x13, 0x4c, 0x6f, 0x67, + 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x32, 0xc9, 0x06, 0x0a, 0x0f, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0e, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x69, + 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, 0x6d, 0x72, + 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x43, 0x0a, 0x0b, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x47, 0x65, 0x74, 0x12, + 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, 0x6d, 0x72, + 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0c, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, + 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x0b, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x69, 0x6d, + 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4c, 0x6f, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0c, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x0d, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4b, 0x0a, + 0x0f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x12, 0x1d, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0b, 0x4c, 0x6f, + 0x67, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, + 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x40, 0x0a, 0x0b, + 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, + 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, + 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x2e, + 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, + 0x2e, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x46, 0x6c, + 0x61, 0x67, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x47, + 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, + 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x10, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, + 0x72, 0x6e, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1028,7 +1370,7 @@ func file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDescData } -var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes = make([]protoimpl.MessageInfo, 20) var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_goTypes = []interface{}{ (*ProcessInstanceSpec)(nil), // 0: imrpc.ProcessInstanceSpec (*SpdkInstanceSpec)(nil), // 1: imrpc.SpdkInstanceSpec @@ -1041,55 +1383,73 @@ var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_ (*InstanceListResponse)(nil), // 8: imrpc.InstanceListResponse (*InstanceLogRequest)(nil), // 9: imrpc.InstanceLogRequest (*InstanceReplaceRequest)(nil), // 10: imrpc.InstanceReplaceRequest - nil, // 11: imrpc.SpdkInstanceSpec.ReplicaAddressMapEntry - nil, // 12: imrpc.InstanceStatus.ConditionsEntry - nil, // 13: imrpc.InstanceListResponse.InstancesEntry - (BackendStoreDriver)(0), // 14: imrpc.BackendStoreDriver - (DataEngine)(0), // 15: imrpc.DataEngine - (*emptypb.Empty)(nil), // 16: google.protobuf.Empty - (*LogResponse)(nil), // 17: LogResponse - (*VersionResponse)(nil), // 18: VersionResponse + (*LogSetLevelRequest)(nil), // 11: imrpc.LogSetLevelRequest + (*LogSetFlagsRequest)(nil), // 12: imrpc.LogSetFlagsRequest + (*LogGetLevelRequest)(nil), // 13: imrpc.LogGetLevelRequest + (*LogGetLevelResponse)(nil), // 14: imrpc.LogGetLevelResponse + (*LogGetFlagsRequest)(nil), // 15: imrpc.LogGetFlagsRequest + (*LogGetFlagsResponse)(nil), // 16: imrpc.LogGetFlagsResponse + nil, // 17: imrpc.SpdkInstanceSpec.ReplicaAddressMapEntry + nil, // 18: imrpc.InstanceStatus.ConditionsEntry + nil, // 19: imrpc.InstanceListResponse.InstancesEntry + (BackendStoreDriver)(0), // 20: imrpc.BackendStoreDriver + (DataEngine)(0), // 21: imrpc.DataEngine + (*emptypb.Empty)(nil), // 22: google.protobuf.Empty + (*LogResponse)(nil), // 23: LogResponse + (*VersionResponse)(nil), // 24: VersionResponse } var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_depIdxs = []int32{ - 11, // 0: imrpc.SpdkInstanceSpec.replica_address_map:type_name -> imrpc.SpdkInstanceSpec.ReplicaAddressMapEntry - 14, // 1: imrpc.InstanceSpec.backend_store_driver:type_name -> imrpc.BackendStoreDriver + 17, // 0: imrpc.SpdkInstanceSpec.replica_address_map:type_name -> imrpc.SpdkInstanceSpec.ReplicaAddressMapEntry + 20, // 1: imrpc.InstanceSpec.backend_store_driver:type_name -> imrpc.BackendStoreDriver 0, // 2: imrpc.InstanceSpec.process_instance_spec:type_name -> imrpc.ProcessInstanceSpec 1, // 3: imrpc.InstanceSpec.spdk_instance_spec:type_name -> imrpc.SpdkInstanceSpec - 15, // 4: imrpc.InstanceSpec.data_engine:type_name -> imrpc.DataEngine - 12, // 5: imrpc.InstanceStatus.conditions:type_name -> imrpc.InstanceStatus.ConditionsEntry + 21, // 4: imrpc.InstanceSpec.data_engine:type_name -> imrpc.DataEngine + 18, // 5: imrpc.InstanceStatus.conditions:type_name -> imrpc.InstanceStatus.ConditionsEntry 2, // 6: imrpc.InstanceCreateRequest.spec:type_name -> imrpc.InstanceSpec - 14, // 7: imrpc.InstanceDeleteRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver - 15, // 8: imrpc.InstanceDeleteRequest.data_engine:type_name -> imrpc.DataEngine - 14, // 9: imrpc.InstanceGetRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver - 15, // 10: imrpc.InstanceGetRequest.data_engine:type_name -> imrpc.DataEngine + 20, // 7: imrpc.InstanceDeleteRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver + 21, // 8: imrpc.InstanceDeleteRequest.data_engine:type_name -> imrpc.DataEngine + 20, // 9: imrpc.InstanceGetRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver + 21, // 10: imrpc.InstanceGetRequest.data_engine:type_name -> imrpc.DataEngine 2, // 11: imrpc.InstanceResponse.spec:type_name -> imrpc.InstanceSpec 3, // 12: imrpc.InstanceResponse.status:type_name -> imrpc.InstanceStatus - 13, // 13: imrpc.InstanceListResponse.instances:type_name -> imrpc.InstanceListResponse.InstancesEntry - 14, // 14: imrpc.InstanceLogRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver - 15, // 15: imrpc.InstanceLogRequest.data_engine:type_name -> imrpc.DataEngine + 19, // 13: imrpc.InstanceListResponse.instances:type_name -> imrpc.InstanceListResponse.InstancesEntry + 20, // 14: imrpc.InstanceLogRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver + 21, // 15: imrpc.InstanceLogRequest.data_engine:type_name -> imrpc.DataEngine 2, // 16: imrpc.InstanceReplaceRequest.spec:type_name -> imrpc.InstanceSpec - 7, // 17: imrpc.InstanceListResponse.InstancesEntry.value:type_name -> imrpc.InstanceResponse - 4, // 18: imrpc.InstanceService.InstanceCreate:input_type -> imrpc.InstanceCreateRequest - 5, // 19: imrpc.InstanceService.InstanceDelete:input_type -> imrpc.InstanceDeleteRequest - 6, // 20: imrpc.InstanceService.InstanceGet:input_type -> imrpc.InstanceGetRequest - 16, // 21: imrpc.InstanceService.InstanceList:input_type -> google.protobuf.Empty - 9, // 22: imrpc.InstanceService.InstanceLog:input_type -> imrpc.InstanceLogRequest - 16, // 23: imrpc.InstanceService.InstanceWatch:input_type -> google.protobuf.Empty - 10, // 24: imrpc.InstanceService.InstanceReplace:input_type -> imrpc.InstanceReplaceRequest - 16, // 25: imrpc.InstanceService.VersionGet:input_type -> google.protobuf.Empty - 7, // 26: imrpc.InstanceService.InstanceCreate:output_type -> imrpc.InstanceResponse - 7, // 27: imrpc.InstanceService.InstanceDelete:output_type -> imrpc.InstanceResponse - 7, // 28: imrpc.InstanceService.InstanceGet:output_type -> imrpc.InstanceResponse - 8, // 29: imrpc.InstanceService.InstanceList:output_type -> imrpc.InstanceListResponse - 17, // 30: imrpc.InstanceService.InstanceLog:output_type -> LogResponse - 16, // 31: imrpc.InstanceService.InstanceWatch:output_type -> google.protobuf.Empty - 7, // 32: imrpc.InstanceService.InstanceReplace:output_type -> imrpc.InstanceResponse - 18, // 33: imrpc.InstanceService.VersionGet:output_type -> VersionResponse - 26, // [26:34] is the sub-list for method output_type - 18, // [18:26] is the sub-list for method input_type - 18, // [18:18] is the sub-list for extension type_name - 18, // [18:18] is the sub-list for extension extendee - 0, // [0:18] is the sub-list for field type_name + 21, // 17: imrpc.LogSetLevelRequest.data_engine:type_name -> imrpc.DataEngine + 21, // 18: imrpc.LogSetFlagsRequest.data_engine:type_name -> imrpc.DataEngine + 21, // 19: imrpc.LogGetLevelRequest.data_engine:type_name -> imrpc.DataEngine + 21, // 20: imrpc.LogGetFlagsRequest.data_engine:type_name -> imrpc.DataEngine + 7, // 21: imrpc.InstanceListResponse.InstancesEntry.value:type_name -> imrpc.InstanceResponse + 4, // 22: imrpc.InstanceService.InstanceCreate:input_type -> imrpc.InstanceCreateRequest + 5, // 23: imrpc.InstanceService.InstanceDelete:input_type -> imrpc.InstanceDeleteRequest + 6, // 24: imrpc.InstanceService.InstanceGet:input_type -> imrpc.InstanceGetRequest + 22, // 25: imrpc.InstanceService.InstanceList:input_type -> google.protobuf.Empty + 9, // 26: imrpc.InstanceService.InstanceLog:input_type -> imrpc.InstanceLogRequest + 22, // 27: imrpc.InstanceService.InstanceWatch:input_type -> google.protobuf.Empty + 10, // 28: imrpc.InstanceService.InstanceReplace:input_type -> imrpc.InstanceReplaceRequest + 11, // 29: imrpc.InstanceService.LogSetLevel:input_type -> imrpc.LogSetLevelRequest + 12, // 30: imrpc.InstanceService.LogSetFlags:input_type -> imrpc.LogSetFlagsRequest + 13, // 31: imrpc.InstanceService.LogGetLevel:input_type -> imrpc.LogGetLevelRequest + 15, // 32: imrpc.InstanceService.LogGetFlags:input_type -> imrpc.LogGetFlagsRequest + 22, // 33: imrpc.InstanceService.VersionGet:input_type -> google.protobuf.Empty + 7, // 34: imrpc.InstanceService.InstanceCreate:output_type -> imrpc.InstanceResponse + 7, // 35: imrpc.InstanceService.InstanceDelete:output_type -> imrpc.InstanceResponse + 7, // 36: imrpc.InstanceService.InstanceGet:output_type -> imrpc.InstanceResponse + 8, // 37: imrpc.InstanceService.InstanceList:output_type -> imrpc.InstanceListResponse + 23, // 38: imrpc.InstanceService.InstanceLog:output_type -> LogResponse + 22, // 39: imrpc.InstanceService.InstanceWatch:output_type -> google.protobuf.Empty + 7, // 40: imrpc.InstanceService.InstanceReplace:output_type -> imrpc.InstanceResponse + 22, // 41: imrpc.InstanceService.LogSetLevel:output_type -> google.protobuf.Empty + 22, // 42: imrpc.InstanceService.LogSetFlags:output_type -> google.protobuf.Empty + 14, // 43: imrpc.InstanceService.LogGetLevel:output_type -> imrpc.LogGetLevelResponse + 16, // 44: imrpc.InstanceService.LogGetFlags:output_type -> imrpc.LogGetFlagsResponse + 24, // 45: imrpc.InstanceService.VersionGet:output_type -> VersionResponse + 34, // [34:46] is the sub-list for method output_type + 22, // [22:34] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name } func init() { file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_init() } @@ -1232,6 +1592,78 @@ func file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto return nil } } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogSetLevelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogSetFlagsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogGetLevelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogGetLevelResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogGetFlagsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogGetFlagsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1239,7 +1671,7 @@ func file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_instance_proto_rawDesc, NumEnums: 0, - NumMessages: 14, + NumMessages: 20, NumExtensions: 0, NumServices: 1, }, @@ -1272,6 +1704,10 @@ type InstanceServiceClient interface { InstanceLog(ctx context.Context, in *InstanceLogRequest, opts ...grpc.CallOption) (InstanceService_InstanceLogClient, error) InstanceWatch(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (InstanceService_InstanceWatchClient, error) InstanceReplace(ctx context.Context, in *InstanceReplaceRequest, opts ...grpc.CallOption) (*InstanceResponse, error) + LogSetLevel(ctx context.Context, in *LogSetLevelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + LogSetFlags(ctx context.Context, in *LogSetFlagsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + LogGetLevel(ctx context.Context, in *LogGetLevelRequest, opts ...grpc.CallOption) (*LogGetLevelResponse, error) + LogGetFlags(ctx context.Context, in *LogGetFlagsRequest, opts ...grpc.CallOption) (*LogGetFlagsResponse, error) VersionGet(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) } @@ -1392,6 +1828,42 @@ func (c *instanceServiceClient) InstanceReplace(ctx context.Context, in *Instanc return out, nil } +func (c *instanceServiceClient) LogSetLevel(ctx context.Context, in *LogSetLevelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/imrpc.InstanceService/LogSetLevel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) LogSetFlags(ctx context.Context, in *LogSetFlagsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/imrpc.InstanceService/LogSetFlags", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) LogGetLevel(ctx context.Context, in *LogGetLevelRequest, opts ...grpc.CallOption) (*LogGetLevelResponse, error) { + out := new(LogGetLevelResponse) + err := c.cc.Invoke(ctx, "/imrpc.InstanceService/LogGetLevel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *instanceServiceClient) LogGetFlags(ctx context.Context, in *LogGetFlagsRequest, opts ...grpc.CallOption) (*LogGetFlagsResponse, error) { + out := new(LogGetFlagsResponse) + err := c.cc.Invoke(ctx, "/imrpc.InstanceService/LogGetFlags", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *instanceServiceClient) VersionGet(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { out := new(VersionResponse) err := c.cc.Invoke(ctx, "/imrpc.InstanceService/VersionGet", in, out, opts...) @@ -1410,6 +1882,10 @@ type InstanceServiceServer interface { InstanceLog(*InstanceLogRequest, InstanceService_InstanceLogServer) error InstanceWatch(*emptypb.Empty, InstanceService_InstanceWatchServer) error InstanceReplace(context.Context, *InstanceReplaceRequest) (*InstanceResponse, error) + LogSetLevel(context.Context, *LogSetLevelRequest) (*emptypb.Empty, error) + LogSetFlags(context.Context, *LogSetFlagsRequest) (*emptypb.Empty, error) + LogGetLevel(context.Context, *LogGetLevelRequest) (*LogGetLevelResponse, error) + LogGetFlags(context.Context, *LogGetFlagsRequest) (*LogGetFlagsResponse, error) VersionGet(context.Context, *emptypb.Empty) (*VersionResponse, error) } @@ -1438,6 +1914,18 @@ func (*UnimplementedInstanceServiceServer) InstanceWatch(*emptypb.Empty, Instanc func (*UnimplementedInstanceServiceServer) InstanceReplace(context.Context, *InstanceReplaceRequest) (*InstanceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method InstanceReplace not implemented") } +func (*UnimplementedInstanceServiceServer) LogSetLevel(context.Context, *LogSetLevelRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogSetLevel not implemented") +} +func (*UnimplementedInstanceServiceServer) LogSetFlags(context.Context, *LogSetFlagsRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogSetFlags not implemented") +} +func (*UnimplementedInstanceServiceServer) LogGetLevel(context.Context, *LogGetLevelRequest) (*LogGetLevelResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogGetLevel not implemented") +} +func (*UnimplementedInstanceServiceServer) LogGetFlags(context.Context, *LogGetFlagsRequest) (*LogGetFlagsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogGetFlags not implemented") +} func (*UnimplementedInstanceServiceServer) VersionGet(context.Context, *emptypb.Empty) (*VersionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VersionGet not implemented") } @@ -1578,6 +2066,78 @@ func _InstanceService_InstanceReplace_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _InstanceService_LogSetLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LogSetLevelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).LogSetLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/imrpc.InstanceService/LogSetLevel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).LogSetLevel(ctx, req.(*LogSetLevelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_LogSetFlags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LogSetFlagsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).LogSetFlags(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/imrpc.InstanceService/LogSetFlags", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).LogSetFlags(ctx, req.(*LogSetFlagsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_LogGetLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LogGetLevelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).LogGetLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/imrpc.InstanceService/LogGetLevel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).LogGetLevel(ctx, req.(*LogGetLevelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _InstanceService_LogGetFlags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LogGetFlagsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(InstanceServiceServer).LogGetFlags(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/imrpc.InstanceService/LogGetFlags", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(InstanceServiceServer).LogGetFlags(ctx, req.(*LogGetFlagsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _InstanceService_VersionGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(emptypb.Empty) if err := dec(in); err != nil { @@ -1620,6 +2180,22 @@ var _InstanceService_serviceDesc = grpc.ServiceDesc{ MethodName: "InstanceReplace", Handler: _InstanceService_InstanceReplace_Handler, }, + { + MethodName: "LogSetLevel", + Handler: _InstanceService_LogSetLevel_Handler, + }, + { + MethodName: "LogSetFlags", + Handler: _InstanceService_LogSetFlags_Handler, + }, + { + MethodName: "LogGetLevel", + Handler: _InstanceService_LogGetLevel_Handler, + }, + { + MethodName: "LogGetFlags", + Handler: _InstanceService_LogGetFlags_Handler, + }, { MethodName: "VersionGet", Handler: _InstanceService_VersionGet_Handler, diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.proto b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.proto index e169a7c42c2..86d7c446655 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.proto +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/instance.proto @@ -17,6 +17,11 @@ service InstanceService { rpc InstanceWatch(google.protobuf.Empty) returns (stream google.protobuf.Empty) {} rpc InstanceReplace(InstanceReplaceRequest) returns (InstanceResponse) {} + rpc LogSetLevel(LogSetLevelRequest) returns (google.protobuf.Empty); + rpc LogSetFlags(LogSetFlagsRequest) returns (google.protobuf.Empty); + rpc LogGetLevel(LogGetLevelRequest) returns (LogGetLevelResponse); + rpc LogGetFlags(LogGetFlagsRequest) returns (LogGetFlagsResponse); + rpc VersionGet(google.protobuf.Empty) returns (VersionResponse); } @@ -102,3 +107,29 @@ message InstanceReplaceRequest { InstanceSpec spec = 1; string terminate_signal = 2; } + +message LogSetLevelRequest { + DataEngine data_engine = 1; + string level = 2; +} + +message LogSetFlagsRequest { + DataEngine data_engine = 1; + string flags = 2; +} + +message LogGetLevelRequest { + DataEngine data_engine = 1; +} + +message LogGetLevelResponse { + string level = 1; +} + +message LogGetFlagsRequest { + DataEngine data_engine = 1; +} + +message LogGetFlagsResponse { + string flags = 1; +} diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.pb.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.pb.go index b25097e7ab8..73e376df712 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.pb.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.pb.go @@ -2361,6 +2361,53 @@ func (x *EngineMetricsGetProxyResponse) GetMetrics() *ptypes.Metrics { return nil } +type RemountVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VolumeName string `protobuf:"bytes,1,opt,name=volume_name,json=volumeName,proto3" json:"volume_name,omitempty"` +} + +func (x *RemountVolumeRequest) Reset() { + *x = RemountVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RemountVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemountVolumeRequest) ProtoMessage() {} + +func (x *RemountVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemountVolumeRequest.ProtoReflect.Descriptor instead. +func (*RemountVolumeRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_rawDescGZIP(), []int{37} +} + +func (x *RemountVolumeRequest) GetVolumeName() string { + if x != nil { + return x.VolumeName + } + return "" +} + var File_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto protoreflect.FileDescriptor var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_rawDesc = []byte{ @@ -2851,183 +2898,191 @@ var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_raw 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x07, 0x6d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x32, 0xb7, 0x15, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x50, 0x0a, 0x10, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x12, - 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, - 0x09, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0c, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x12, 0x20, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, - 0x78, 0x70, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x56, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x27, 0x2e, 0x69, 0x6d, - 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x16, - 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x53, 0x68, - 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x74, 0x0a, 0x22, 0x56, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x4d, 0x61, 0x72, 0x6b, 0x53, 0x6e, 0x61, 0x70, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x53, 0x65, 0x74, 0x12, - 0x36, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x4d, 0x61, 0x72, 0x6b, 0x53, 0x6e, 0x61, - 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x53, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x62, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x4d, 0x61, 0x78, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x65, 0x74, 0x12, 0x2d, 0x2e, 0x69, - 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, - 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, 0x78, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x12, 0x60, 0x0a, 0x18, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x12, - 0x2c, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, - 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, 0x78, 0x53, - 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x72, 0x69, 0x63, 0x73, 0x22, 0x37, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x32, 0x85, 0x16, + 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x50, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, + 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x78, 0x70, + 0x61, 0x6e, 0x64, 0x12, 0x20, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x56, 0x0a, + 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x27, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x22, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x69, 0x6d, - 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, - 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x12, 0x22, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x50, 0x75, 0x72, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, - 0x72, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x16, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x46, + 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, + 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x5f, 0x0a, 0x13, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, - 0x72, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, - 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, 0x72, 0x67, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, - 0x6c, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x5f, 0x0a, 0x13, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2d, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x12, 0x22, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, - 0x0a, 0x0c, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x20, - 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x74, 0x79, 0x12, 0x74, 0x0a, 0x22, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, 0x61, + 0x70, 0x4d, 0x61, 0x72, 0x6b, 0x53, 0x6e, 0x61, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x53, 0x65, 0x74, 0x12, 0x36, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, + 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x6e, 0x6d, + 0x61, 0x70, 0x4d, 0x61, 0x72, 0x6b, 0x53, 0x6e, 0x61, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x6a, 0x0a, 0x12, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x26, - 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, - 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, 0x14, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x69, - 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x12, 0x27, 0x2e, 0x69, 0x6d, - 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, 0x78, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x53, 0x65, 0x74, 0x12, 0x2d, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x4d, 0x61, 0x78, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x18, - 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x6f, 0x75, - 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x12, 0x1e, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x52, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x60, 0x0a, 0x18, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, + 0x78, 0x53, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x12, 0x2c, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, + 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, - 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x19, 0x2e, - 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, - 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, - 0x73, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x64, 0x0a, 0x17, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x28, 0x2e, - 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5e, + 0x0a, 0x0e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x22, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, + 0x0a, 0x0c, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x19, + 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x69, 0x6d, 0x72, 0x70, + 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, + 0x65, 0x72, 0x74, 0x12, 0x22, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x4a, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x4a, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, 0x72, 0x67, 0x65, + 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, 0x72, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x11, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x12, 0x25, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, - 0x4d, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, - 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, - 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x65, - 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x39, - 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x6f, 0x6e, - 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2d, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, - 0x70, 0x6b, 0x67, 0x2f, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5f, 0x0a, 0x13, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, 0x72, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, + 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x50, 0x75, 0x72, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x12, 0x21, 0x2e, + 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5f, 0x0a, 0x13, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x69, 0x6d, 0x72, + 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x43, 0x6c, 0x6f, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0e, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x22, 0x2e, 0x69, 0x6d, + 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0c, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x20, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x6a, 0x0a, 0x12, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x26, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2c, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, + 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, + 0x22, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a, + 0x14, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x5b, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x13, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, + 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x12, 0x27, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x18, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x44, 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x12, + 0x1e, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x17, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, + 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, + 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x28, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x56, 0x65, 0x72, 0x69, + 0x66, 0x79, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x21, 0x2e, 0x69, 0x6d, 0x72, 0x70, + 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, + 0x6f, 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x25, 0x2e, 0x69, 0x6d, 0x72, 0x70, + 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, + 0x6f, 0x64, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x50, + 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x24, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x1b, 0x2e, 0x69, 0x6d, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, + 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2d, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x69, 0x6d, 0x72, 0x70, 0x63, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3042,7 +3097,7 @@ func file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_ra return file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_rawDescData } -var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_msgTypes = make([]protoimpl.MessageInfo, 46) +var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_msgTypes = make([]protoimpl.MessageInfo, 47) var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_goTypes = []interface{}{ (*ProxyEngineRequest)(nil), // 0: imrpc.ProxyEngineRequest (*EngineVersionProxyResponse)(nil), // 1: imrpc.EngineVersionProxyResponse @@ -3081,85 +3136,86 @@ var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_goT (*EngineSnapshotHashStatusRequest)(nil), // 34: imrpc.EngineSnapshotHashStatusRequest (*EngineSnapshotHashStatusProxyResponse)(nil), // 35: imrpc.EngineSnapshotHashStatusProxyResponse (*EngineMetricsGetProxyResponse)(nil), // 36: imrpc.EngineMetricsGetProxyResponse - nil, // 37: imrpc.EngineSnapshotListProxyResponse.DisksEntry - nil, // 38: imrpc.EngineSnapshotDiskInfo.ChildrenEntry - nil, // 39: imrpc.EngineSnapshotDiskInfo.LabelsEntry - nil, // 40: imrpc.EngineSnapshotPurgeStatusProxyResponse.StatusEntry - nil, // 41: imrpc.EngineSnapshotCloneStatusProxyResponse.StatusEntry - nil, // 42: imrpc.EngineSnapshotBackupRequest.LabelsEntry - nil, // 43: imrpc.EngineBackupRestoreStatusProxyResponse.StatusEntry - nil, // 44: imrpc.EngineReplicaRebuildStatusProxyResponse.StatusEntry - nil, // 45: imrpc.EngineSnapshotHashStatusProxyResponse.StatusEntry - (BackendStoreDriver)(0), // 46: imrpc.BackendStoreDriver - (DataEngine)(0), // 47: imrpc.DataEngine - (*ptypes.VersionOutput)(nil), // 48: ptypes.VersionOutput - (*ptypes.Volume)(nil), // 49: ptypes.Volume - (*ptypes.VolumeExpandRequest)(nil), // 50: ptypes.VolumeExpandRequest - (*ptypes.VolumeFrontendStartRequest)(nil), // 51: ptypes.VolumeFrontendStartRequest - (*ptypes.VolumeSnapshotRequest)(nil), // 52: ptypes.VolumeSnapshotRequest - (*ptypes.VolumeSnapshotReply)(nil), // 53: ptypes.VolumeSnapshotReply - (*ptypes.VolumeUnmapMarkSnapChainRemovedSetRequest)(nil), // 54: ptypes.VolumeUnmapMarkSnapChainRemovedSetRequest - (*ptypes.VolumeSnapshotMaxCountSetRequest)(nil), // 55: ptypes.VolumeSnapshotMaxCountSetRequest - (*ptypes.VolumeSnapshotMaxSizeSetRequest)(nil), // 56: ptypes.VolumeSnapshotMaxSizeSetRequest - (*ptypes.ReplicaListReply)(nil), // 57: ptypes.ReplicaListReply - (ptypes.ReplicaMode)(0), // 58: ptypes.ReplicaMode - (*ptypes.Metrics)(nil), // 59: ptypes.Metrics - (*ptypes.SnapshotPurgeStatusResponse)(nil), // 60: ptypes.SnapshotPurgeStatusResponse - (*ptypes.SnapshotCloneStatusResponse)(nil), // 61: ptypes.SnapshotCloneStatusResponse - (*ptypes.ReplicaRebuildStatusResponse)(nil), // 62: ptypes.ReplicaRebuildStatusResponse - (*ptypes.SnapshotHashStatusResponse)(nil), // 63: ptypes.SnapshotHashStatusResponse - (*emptypb.Empty)(nil), // 64: google.protobuf.Empty + (*RemountVolumeRequest)(nil), // 37: imrpc.RemountVolumeRequest + nil, // 38: imrpc.EngineSnapshotListProxyResponse.DisksEntry + nil, // 39: imrpc.EngineSnapshotDiskInfo.ChildrenEntry + nil, // 40: imrpc.EngineSnapshotDiskInfo.LabelsEntry + nil, // 41: imrpc.EngineSnapshotPurgeStatusProxyResponse.StatusEntry + nil, // 42: imrpc.EngineSnapshotCloneStatusProxyResponse.StatusEntry + nil, // 43: imrpc.EngineSnapshotBackupRequest.LabelsEntry + nil, // 44: imrpc.EngineBackupRestoreStatusProxyResponse.StatusEntry + nil, // 45: imrpc.EngineReplicaRebuildStatusProxyResponse.StatusEntry + nil, // 46: imrpc.EngineSnapshotHashStatusProxyResponse.StatusEntry + (BackendStoreDriver)(0), // 47: imrpc.BackendStoreDriver + (DataEngine)(0), // 48: imrpc.DataEngine + (*ptypes.VersionOutput)(nil), // 49: ptypes.VersionOutput + (*ptypes.Volume)(nil), // 50: ptypes.Volume + (*ptypes.VolumeExpandRequest)(nil), // 51: ptypes.VolumeExpandRequest + (*ptypes.VolumeFrontendStartRequest)(nil), // 52: ptypes.VolumeFrontendStartRequest + (*ptypes.VolumeSnapshotRequest)(nil), // 53: ptypes.VolumeSnapshotRequest + (*ptypes.VolumeSnapshotReply)(nil), // 54: ptypes.VolumeSnapshotReply + (*ptypes.VolumeUnmapMarkSnapChainRemovedSetRequest)(nil), // 55: ptypes.VolumeUnmapMarkSnapChainRemovedSetRequest + (*ptypes.VolumeSnapshotMaxCountSetRequest)(nil), // 56: ptypes.VolumeSnapshotMaxCountSetRequest + (*ptypes.VolumeSnapshotMaxSizeSetRequest)(nil), // 57: ptypes.VolumeSnapshotMaxSizeSetRequest + (*ptypes.ReplicaListReply)(nil), // 58: ptypes.ReplicaListReply + (ptypes.ReplicaMode)(0), // 59: ptypes.ReplicaMode + (*ptypes.Metrics)(nil), // 60: ptypes.Metrics + (*ptypes.SnapshotPurgeStatusResponse)(nil), // 61: ptypes.SnapshotPurgeStatusResponse + (*ptypes.SnapshotCloneStatusResponse)(nil), // 62: ptypes.SnapshotCloneStatusResponse + (*ptypes.ReplicaRebuildStatusResponse)(nil), // 63: ptypes.ReplicaRebuildStatusResponse + (*ptypes.SnapshotHashStatusResponse)(nil), // 64: ptypes.SnapshotHashStatusResponse + (*emptypb.Empty)(nil), // 65: google.protobuf.Empty } var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_depIdxs = []int32{ - 46, // 0: imrpc.ProxyEngineRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver - 47, // 1: imrpc.ProxyEngineRequest.data_engine:type_name -> imrpc.DataEngine - 48, // 2: imrpc.EngineVersionProxyResponse.version:type_name -> ptypes.VersionOutput - 49, // 3: imrpc.EngineVolumeGetProxyResponse.volume:type_name -> ptypes.Volume + 47, // 0: imrpc.ProxyEngineRequest.backend_store_driver:type_name -> imrpc.BackendStoreDriver + 48, // 1: imrpc.ProxyEngineRequest.data_engine:type_name -> imrpc.DataEngine + 49, // 2: imrpc.EngineVersionProxyResponse.version:type_name -> ptypes.VersionOutput + 50, // 3: imrpc.EngineVolumeGetProxyResponse.volume:type_name -> ptypes.Volume 0, // 4: imrpc.EngineVolumeExpandRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 50, // 5: imrpc.EngineVolumeExpandRequest.expand:type_name -> ptypes.VolumeExpandRequest + 51, // 5: imrpc.EngineVolumeExpandRequest.expand:type_name -> ptypes.VolumeExpandRequest 0, // 6: imrpc.EngineVolumeFrontendStartRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 51, // 7: imrpc.EngineVolumeFrontendStartRequest.frontend_start:type_name -> ptypes.VolumeFrontendStartRequest + 52, // 7: imrpc.EngineVolumeFrontendStartRequest.frontend_start:type_name -> ptypes.VolumeFrontendStartRequest 0, // 8: imrpc.EngineVolumeSnapshotRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 52, // 9: imrpc.EngineVolumeSnapshotRequest.snapshot_volume:type_name -> ptypes.VolumeSnapshotRequest - 53, // 10: imrpc.EngineVolumeSnapshotProxyResponse.snapshot:type_name -> ptypes.VolumeSnapshotReply + 53, // 9: imrpc.EngineVolumeSnapshotRequest.snapshot_volume:type_name -> ptypes.VolumeSnapshotRequest + 54, // 10: imrpc.EngineVolumeSnapshotProxyResponse.snapshot:type_name -> ptypes.VolumeSnapshotReply 0, // 11: imrpc.EngineVolumeUnmapMarkSnapChainRemovedSetRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 54, // 12: imrpc.EngineVolumeUnmapMarkSnapChainRemovedSetRequest.unmap_mark_snap:type_name -> ptypes.VolumeUnmapMarkSnapChainRemovedSetRequest + 55, // 12: imrpc.EngineVolumeUnmapMarkSnapChainRemovedSetRequest.unmap_mark_snap:type_name -> ptypes.VolumeUnmapMarkSnapChainRemovedSetRequest 0, // 13: imrpc.EngineVolumeSnapshotMaxCountSetRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 55, // 14: imrpc.EngineVolumeSnapshotMaxCountSetRequest.count:type_name -> ptypes.VolumeSnapshotMaxCountSetRequest + 56, // 14: imrpc.EngineVolumeSnapshotMaxCountSetRequest.count:type_name -> ptypes.VolumeSnapshotMaxCountSetRequest 0, // 15: imrpc.EngineVolumeSnapshotMaxSizeSetRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 56, // 16: imrpc.EngineVolumeSnapshotMaxSizeSetRequest.size:type_name -> ptypes.VolumeSnapshotMaxSizeSetRequest - 37, // 17: imrpc.EngineSnapshotListProxyResponse.disks:type_name -> imrpc.EngineSnapshotListProxyResponse.DisksEntry - 38, // 18: imrpc.EngineSnapshotDiskInfo.children:type_name -> imrpc.EngineSnapshotDiskInfo.ChildrenEntry - 39, // 19: imrpc.EngineSnapshotDiskInfo.labels:type_name -> imrpc.EngineSnapshotDiskInfo.LabelsEntry + 57, // 16: imrpc.EngineVolumeSnapshotMaxSizeSetRequest.size:type_name -> ptypes.VolumeSnapshotMaxSizeSetRequest + 38, // 17: imrpc.EngineSnapshotListProxyResponse.disks:type_name -> imrpc.EngineSnapshotListProxyResponse.DisksEntry + 39, // 18: imrpc.EngineSnapshotDiskInfo.children:type_name -> imrpc.EngineSnapshotDiskInfo.ChildrenEntry + 40, // 19: imrpc.EngineSnapshotDiskInfo.labels:type_name -> imrpc.EngineSnapshotDiskInfo.LabelsEntry 0, // 20: imrpc.EngineSnapshotRevertRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest 0, // 21: imrpc.EngineSnapshotPurgeRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 40, // 22: imrpc.EngineSnapshotPurgeStatusProxyResponse.status:type_name -> imrpc.EngineSnapshotPurgeStatusProxyResponse.StatusEntry + 41, // 22: imrpc.EngineSnapshotPurgeStatusProxyResponse.status:type_name -> imrpc.EngineSnapshotPurgeStatusProxyResponse.StatusEntry 0, // 23: imrpc.EngineSnapshotCloneRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 41, // 24: imrpc.EngineSnapshotCloneStatusProxyResponse.status:type_name -> imrpc.EngineSnapshotCloneStatusProxyResponse.StatusEntry + 42, // 24: imrpc.EngineSnapshotCloneStatusProxyResponse.status:type_name -> imrpc.EngineSnapshotCloneStatusProxyResponse.StatusEntry 0, // 25: imrpc.EngineSnapshotRemoveRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest 0, // 26: imrpc.EngineSnapshotBackupRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 42, // 27: imrpc.EngineSnapshotBackupRequest.labels:type_name -> imrpc.EngineSnapshotBackupRequest.LabelsEntry + 43, // 27: imrpc.EngineSnapshotBackupRequest.labels:type_name -> imrpc.EngineSnapshotBackupRequest.LabelsEntry 0, // 28: imrpc.EngineSnapshotBackupStatusRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest 0, // 29: imrpc.EngineBackupRestoreRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 43, // 30: imrpc.EngineBackupRestoreStatusProxyResponse.status:type_name -> imrpc.EngineBackupRestoreStatusProxyResponse.StatusEntry + 44, // 30: imrpc.EngineBackupRestoreStatusProxyResponse.status:type_name -> imrpc.EngineBackupRestoreStatusProxyResponse.StatusEntry 0, // 31: imrpc.EngineBackupRestoreFinishRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest 0, // 32: imrpc.EngineReplicaAddRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 57, // 33: imrpc.EngineReplicaListProxyResponse.replica_list:type_name -> ptypes.ReplicaListReply + 58, // 33: imrpc.EngineReplicaListProxyResponse.replica_list:type_name -> ptypes.ReplicaListReply 0, // 34: imrpc.EngineReplicaVerifyRebuildRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 44, // 35: imrpc.EngineReplicaRebuildStatusProxyResponse.status:type_name -> imrpc.EngineReplicaRebuildStatusProxyResponse.StatusEntry + 45, // 35: imrpc.EngineReplicaRebuildStatusProxyResponse.status:type_name -> imrpc.EngineReplicaRebuildStatusProxyResponse.StatusEntry 0, // 36: imrpc.EngineReplicaRemoveRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest 0, // 37: imrpc.EngineReplicaModeUpdateRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 58, // 38: imrpc.EngineReplicaModeUpdateRequest.mode:type_name -> ptypes.ReplicaMode + 59, // 38: imrpc.EngineReplicaModeUpdateRequest.mode:type_name -> ptypes.ReplicaMode 0, // 39: imrpc.EngineSnapshotHashRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest 0, // 40: imrpc.EngineSnapshotHashStatusRequest.proxy_engine_request:type_name -> imrpc.ProxyEngineRequest - 45, // 41: imrpc.EngineSnapshotHashStatusProxyResponse.status:type_name -> imrpc.EngineSnapshotHashStatusProxyResponse.StatusEntry - 59, // 42: imrpc.EngineMetricsGetProxyResponse.metrics:type_name -> ptypes.Metrics + 46, // 41: imrpc.EngineSnapshotHashStatusProxyResponse.status:type_name -> imrpc.EngineSnapshotHashStatusProxyResponse.StatusEntry + 60, // 42: imrpc.EngineMetricsGetProxyResponse.metrics:type_name -> ptypes.Metrics 11, // 43: imrpc.EngineSnapshotListProxyResponse.DisksEntry.value:type_name -> imrpc.EngineSnapshotDiskInfo - 60, // 44: imrpc.EngineSnapshotPurgeStatusProxyResponse.StatusEntry.value:type_name -> ptypes.SnapshotPurgeStatusResponse - 61, // 45: imrpc.EngineSnapshotCloneStatusProxyResponse.StatusEntry.value:type_name -> ptypes.SnapshotCloneStatusResponse + 61, // 44: imrpc.EngineSnapshotPurgeStatusProxyResponse.StatusEntry.value:type_name -> ptypes.SnapshotPurgeStatusResponse + 62, // 45: imrpc.EngineSnapshotCloneStatusProxyResponse.StatusEntry.value:type_name -> ptypes.SnapshotCloneStatusResponse 25, // 46: imrpc.EngineBackupRestoreStatusProxyResponse.StatusEntry.value:type_name -> imrpc.EngineBackupRestoreStatus - 62, // 47: imrpc.EngineReplicaRebuildStatusProxyResponse.StatusEntry.value:type_name -> ptypes.ReplicaRebuildStatusResponse - 63, // 48: imrpc.EngineSnapshotHashStatusProxyResponse.StatusEntry.value:type_name -> ptypes.SnapshotHashStatusResponse + 63, // 47: imrpc.EngineReplicaRebuildStatusProxyResponse.StatusEntry.value:type_name -> ptypes.ReplicaRebuildStatusResponse + 64, // 48: imrpc.EngineSnapshotHashStatusProxyResponse.StatusEntry.value:type_name -> ptypes.SnapshotHashStatusResponse 0, // 49: imrpc.ProxyEngineService.ServerVersionGet:input_type -> imrpc.ProxyEngineRequest 0, // 50: imrpc.ProxyEngineService.VolumeGet:input_type -> imrpc.ProxyEngineRequest 3, // 51: imrpc.ProxyEngineService.VolumeExpand:input_type -> imrpc.EngineVolumeExpandRequest @@ -3183,7 +3239,7 @@ var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_dep 22, // 69: imrpc.ProxyEngineService.BackupRestore:input_type -> imrpc.EngineBackupRestoreRequest 0, // 70: imrpc.ProxyEngineService.BackupRestoreStatus:input_type -> imrpc.ProxyEngineRequest 26, // 71: imrpc.ProxyEngineService.BackupRestoreFinish:input_type -> imrpc.EngineBackupRestoreFinishRequest - 64, // 72: imrpc.ProxyEngineService.CleanupBackupMountPoints:input_type -> google.protobuf.Empty + 65, // 72: imrpc.ProxyEngineService.CleanupBackupMountPoints:input_type -> google.protobuf.Empty 27, // 73: imrpc.ProxyEngineService.ReplicaAdd:input_type -> imrpc.EngineReplicaAddRequest 0, // 74: imrpc.ProxyEngineService.ReplicaList:input_type -> imrpc.ProxyEngineRequest 0, // 75: imrpc.ProxyEngineService.ReplicaRebuildingStatus:input_type -> imrpc.ProxyEngineRequest @@ -3191,39 +3247,41 @@ var file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_dep 31, // 77: imrpc.ProxyEngineService.ReplicaRemove:input_type -> imrpc.EngineReplicaRemoveRequest 32, // 78: imrpc.ProxyEngineService.ReplicaModeUpdate:input_type -> imrpc.EngineReplicaModeUpdateRequest 0, // 79: imrpc.ProxyEngineService.MetricsGet:input_type -> imrpc.ProxyEngineRequest - 1, // 80: imrpc.ProxyEngineService.ServerVersionGet:output_type -> imrpc.EngineVersionProxyResponse - 2, // 81: imrpc.ProxyEngineService.VolumeGet:output_type -> imrpc.EngineVolumeGetProxyResponse - 64, // 82: imrpc.ProxyEngineService.VolumeExpand:output_type -> google.protobuf.Empty - 64, // 83: imrpc.ProxyEngineService.VolumeFrontendStart:output_type -> google.protobuf.Empty - 64, // 84: imrpc.ProxyEngineService.VolumeFrontendShutdown:output_type -> google.protobuf.Empty - 64, // 85: imrpc.ProxyEngineService.VolumeUnmapMarkSnapChainRemovedSet:output_type -> google.protobuf.Empty - 64, // 86: imrpc.ProxyEngineService.VolumeSnapshotMaxCountSet:output_type -> google.protobuf.Empty - 64, // 87: imrpc.ProxyEngineService.VolumeSnapshotMaxSizeSet:output_type -> google.protobuf.Empty - 6, // 88: imrpc.ProxyEngineService.VolumeSnapshot:output_type -> imrpc.EngineVolumeSnapshotProxyResponse - 10, // 89: imrpc.ProxyEngineService.SnapshotList:output_type -> imrpc.EngineSnapshotListProxyResponse - 64, // 90: imrpc.ProxyEngineService.SnapshotRevert:output_type -> google.protobuf.Empty - 64, // 91: imrpc.ProxyEngineService.SnapshotPurge:output_type -> google.protobuf.Empty - 14, // 92: imrpc.ProxyEngineService.SnapshotPurgeStatus:output_type -> imrpc.EngineSnapshotPurgeStatusProxyResponse - 64, // 93: imrpc.ProxyEngineService.SnapshotClone:output_type -> google.protobuf.Empty - 16, // 94: imrpc.ProxyEngineService.SnapshotCloneStatus:output_type -> imrpc.EngineSnapshotCloneStatusProxyResponse - 64, // 95: imrpc.ProxyEngineService.SnapshotRemove:output_type -> google.protobuf.Empty - 64, // 96: imrpc.ProxyEngineService.SnapshotHash:output_type -> google.protobuf.Empty - 35, // 97: imrpc.ProxyEngineService.SnapshotHashStatus:output_type -> imrpc.EngineSnapshotHashStatusProxyResponse - 19, // 98: imrpc.ProxyEngineService.SnapshotBackup:output_type -> imrpc.EngineSnapshotBackupProxyResponse - 21, // 99: imrpc.ProxyEngineService.SnapshotBackupStatus:output_type -> imrpc.EngineSnapshotBackupStatusProxyResponse - 23, // 100: imrpc.ProxyEngineService.BackupRestore:output_type -> imrpc.EngineBackupRestoreProxyResponse - 24, // 101: imrpc.ProxyEngineService.BackupRestoreStatus:output_type -> imrpc.EngineBackupRestoreStatusProxyResponse - 64, // 102: imrpc.ProxyEngineService.BackupRestoreFinish:output_type -> google.protobuf.Empty - 64, // 103: imrpc.ProxyEngineService.CleanupBackupMountPoints:output_type -> google.protobuf.Empty - 64, // 104: imrpc.ProxyEngineService.ReplicaAdd:output_type -> google.protobuf.Empty - 28, // 105: imrpc.ProxyEngineService.ReplicaList:output_type -> imrpc.EngineReplicaListProxyResponse - 30, // 106: imrpc.ProxyEngineService.ReplicaRebuildingStatus:output_type -> imrpc.EngineReplicaRebuildStatusProxyResponse - 64, // 107: imrpc.ProxyEngineService.ReplicaVerifyRebuild:output_type -> google.protobuf.Empty - 64, // 108: imrpc.ProxyEngineService.ReplicaRemove:output_type -> google.protobuf.Empty - 64, // 109: imrpc.ProxyEngineService.ReplicaModeUpdate:output_type -> google.protobuf.Empty - 36, // 110: imrpc.ProxyEngineService.MetricsGet:output_type -> imrpc.EngineMetricsGetProxyResponse - 80, // [80:111] is the sub-list for method output_type - 49, // [49:80] is the sub-list for method input_type + 37, // 80: imrpc.ProxyEngineService.RemountReadOnlyVolume:input_type -> imrpc.RemountVolumeRequest + 1, // 81: imrpc.ProxyEngineService.ServerVersionGet:output_type -> imrpc.EngineVersionProxyResponse + 2, // 82: imrpc.ProxyEngineService.VolumeGet:output_type -> imrpc.EngineVolumeGetProxyResponse + 65, // 83: imrpc.ProxyEngineService.VolumeExpand:output_type -> google.protobuf.Empty + 65, // 84: imrpc.ProxyEngineService.VolumeFrontendStart:output_type -> google.protobuf.Empty + 65, // 85: imrpc.ProxyEngineService.VolumeFrontendShutdown:output_type -> google.protobuf.Empty + 65, // 86: imrpc.ProxyEngineService.VolumeUnmapMarkSnapChainRemovedSet:output_type -> google.protobuf.Empty + 65, // 87: imrpc.ProxyEngineService.VolumeSnapshotMaxCountSet:output_type -> google.protobuf.Empty + 65, // 88: imrpc.ProxyEngineService.VolumeSnapshotMaxSizeSet:output_type -> google.protobuf.Empty + 6, // 89: imrpc.ProxyEngineService.VolumeSnapshot:output_type -> imrpc.EngineVolumeSnapshotProxyResponse + 10, // 90: imrpc.ProxyEngineService.SnapshotList:output_type -> imrpc.EngineSnapshotListProxyResponse + 65, // 91: imrpc.ProxyEngineService.SnapshotRevert:output_type -> google.protobuf.Empty + 65, // 92: imrpc.ProxyEngineService.SnapshotPurge:output_type -> google.protobuf.Empty + 14, // 93: imrpc.ProxyEngineService.SnapshotPurgeStatus:output_type -> imrpc.EngineSnapshotPurgeStatusProxyResponse + 65, // 94: imrpc.ProxyEngineService.SnapshotClone:output_type -> google.protobuf.Empty + 16, // 95: imrpc.ProxyEngineService.SnapshotCloneStatus:output_type -> imrpc.EngineSnapshotCloneStatusProxyResponse + 65, // 96: imrpc.ProxyEngineService.SnapshotRemove:output_type -> google.protobuf.Empty + 65, // 97: imrpc.ProxyEngineService.SnapshotHash:output_type -> google.protobuf.Empty + 35, // 98: imrpc.ProxyEngineService.SnapshotHashStatus:output_type -> imrpc.EngineSnapshotHashStatusProxyResponse + 19, // 99: imrpc.ProxyEngineService.SnapshotBackup:output_type -> imrpc.EngineSnapshotBackupProxyResponse + 21, // 100: imrpc.ProxyEngineService.SnapshotBackupStatus:output_type -> imrpc.EngineSnapshotBackupStatusProxyResponse + 23, // 101: imrpc.ProxyEngineService.BackupRestore:output_type -> imrpc.EngineBackupRestoreProxyResponse + 24, // 102: imrpc.ProxyEngineService.BackupRestoreStatus:output_type -> imrpc.EngineBackupRestoreStatusProxyResponse + 65, // 103: imrpc.ProxyEngineService.BackupRestoreFinish:output_type -> google.protobuf.Empty + 65, // 104: imrpc.ProxyEngineService.CleanupBackupMountPoints:output_type -> google.protobuf.Empty + 65, // 105: imrpc.ProxyEngineService.ReplicaAdd:output_type -> google.protobuf.Empty + 28, // 106: imrpc.ProxyEngineService.ReplicaList:output_type -> imrpc.EngineReplicaListProxyResponse + 30, // 107: imrpc.ProxyEngineService.ReplicaRebuildingStatus:output_type -> imrpc.EngineReplicaRebuildStatusProxyResponse + 65, // 108: imrpc.ProxyEngineService.ReplicaVerifyRebuild:output_type -> google.protobuf.Empty + 65, // 109: imrpc.ProxyEngineService.ReplicaRemove:output_type -> google.protobuf.Empty + 65, // 110: imrpc.ProxyEngineService.ReplicaModeUpdate:output_type -> google.protobuf.Empty + 36, // 111: imrpc.ProxyEngineService.MetricsGet:output_type -> imrpc.EngineMetricsGetProxyResponse + 65, // 112: imrpc.ProxyEngineService.RemountReadOnlyVolume:output_type -> google.protobuf.Empty + 81, // [81:113] is the sub-list for method output_type + 49, // [49:81] is the sub-list for method input_type 49, // [49:49] is the sub-list for extension type_name 49, // [49:49] is the sub-list for extension extendee 0, // [0:49] is the sub-list for field type_name @@ -3680,6 +3738,18 @@ func file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_in return nil } } + file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemountVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -3687,7 +3757,7 @@ func file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_in GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_longhorn_longhorn_instance_manager_pkg_imrpc_proxy_proto_rawDesc, NumEnums: 0, - NumMessages: 46, + NumMessages: 47, NumExtensions: 0, NumServices: 1, }, @@ -3744,6 +3814,7 @@ type ProxyEngineServiceClient interface { ReplicaRemove(ctx context.Context, in *EngineReplicaRemoveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) ReplicaModeUpdate(ctx context.Context, in *EngineReplicaModeUpdateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) MetricsGet(ctx context.Context, in *ProxyEngineRequest, opts ...grpc.CallOption) (*EngineMetricsGetProxyResponse, error) + RemountReadOnlyVolume(ctx context.Context, in *RemountVolumeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) } type proxyEngineServiceClient struct { @@ -4033,6 +4104,15 @@ func (c *proxyEngineServiceClient) MetricsGet(ctx context.Context, in *ProxyEngi return out, nil } +func (c *proxyEngineServiceClient) RemountReadOnlyVolume(ctx context.Context, in *RemountVolumeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/imrpc.ProxyEngineService/RemountReadOnlyVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ProxyEngineServiceServer is the server API for ProxyEngineService service. type ProxyEngineServiceServer interface { ServerVersionGet(context.Context, *ProxyEngineRequest) (*EngineVersionProxyResponse, error) @@ -4066,6 +4146,7 @@ type ProxyEngineServiceServer interface { ReplicaRemove(context.Context, *EngineReplicaRemoveRequest) (*emptypb.Empty, error) ReplicaModeUpdate(context.Context, *EngineReplicaModeUpdateRequest) (*emptypb.Empty, error) MetricsGet(context.Context, *ProxyEngineRequest) (*EngineMetricsGetProxyResponse, error) + RemountReadOnlyVolume(context.Context, *RemountVolumeRequest) (*emptypb.Empty, error) } // UnimplementedProxyEngineServiceServer can be embedded to have forward compatible implementations. @@ -4165,6 +4246,9 @@ func (*UnimplementedProxyEngineServiceServer) ReplicaModeUpdate(context.Context, func (*UnimplementedProxyEngineServiceServer) MetricsGet(context.Context, *ProxyEngineRequest) (*EngineMetricsGetProxyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method MetricsGet not implemented") } +func (*UnimplementedProxyEngineServiceServer) RemountReadOnlyVolume(context.Context, *RemountVolumeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemountReadOnlyVolume not implemented") +} func RegisterProxyEngineServiceServer(s *grpc.Server, srv ProxyEngineServiceServer) { s.RegisterService(&_ProxyEngineService_serviceDesc, srv) @@ -4728,6 +4812,24 @@ func _ProxyEngineService_MetricsGet_Handler(srv interface{}, ctx context.Context return interceptor(ctx, in, info, handler) } +func _ProxyEngineService_RemountReadOnlyVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemountVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProxyEngineServiceServer).RemountReadOnlyVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/imrpc.ProxyEngineService/RemountReadOnlyVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProxyEngineServiceServer).RemountReadOnlyVolume(ctx, req.(*RemountVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _ProxyEngineService_serviceDesc = grpc.ServiceDesc{ ServiceName: "imrpc.ProxyEngineService", HandlerType: (*ProxyEngineServiceServer)(nil), @@ -4856,6 +4958,10 @@ var _ProxyEngineService_serviceDesc = grpc.ServiceDesc{ MethodName: "MetricsGet", Handler: _ProxyEngineService_MetricsGet_Handler, }, + { + MethodName: "RemountReadOnlyVolume", + Handler: _ProxyEngineService_RemountReadOnlyVolume_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.proto", diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.proto b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.proto index 65ef749d4ec..f4391ce64e6 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.proto +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/imrpc/proxy.proto @@ -47,6 +47,7 @@ service ProxyEngineService { rpc ReplicaModeUpdate(EngineReplicaModeUpdateRequest) returns (google.protobuf.Empty); rpc MetricsGet(ProxyEngineRequest) returns (EngineMetricsGetProxyResponse); + rpc RemountReadOnlyVolume(RemountVolumeRequest) returns (google.protobuf.Empty); } message ProxyEngineRequest{ @@ -293,3 +294,7 @@ message EngineSnapshotHashStatusProxyResponse { message EngineMetricsGetProxyResponse { ptypes.Metrics metrics = 1; } + +message RemountVolumeRequest { + string volume_name = 1; +} diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/meta/version.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/meta/version.go index af5feeed6eb..39e06029134 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/meta/version.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/meta/version.go @@ -2,7 +2,7 @@ package meta const ( // InstanceManagerAPIVersion is used for compatibility check for longhorn-manager - InstanceManagerAPIVersion = 5 + InstanceManagerAPIVersion = 6 InstanceManagerAPIMinVersion = 1 // InstanceManagerProxyAPIVersion is used for compatibility check for longhorn-manager diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/types/types.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/types/types.go index 493b4f5ab6c..28b5bba2862 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/types/types.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/types/types.go @@ -44,7 +44,5 @@ const ( ) const ( - GlobalMountPathPattern = "/host/var/lib/kubelet/plugins/kubernetes.io/csi/driver.longhorn.io/*/globalmount" - EngineConditionFilesystemReadOnly = "FilesystemReadOnly" ) diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/grpcutil.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/grpcutil.go index 0a98d30f609..3e78667a452 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/grpcutil.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/grpcutil.go @@ -74,7 +74,7 @@ func NewServer(endpoint string, tlsConfig *tls.Config, opts ...grpc.ServerOption } // ServerTLS prepares the TLS configuration needed for a server with given -// encoded certficate and private key. +// encoded certificate and private key. func ServerTLS(caCert, cert, key []byte, peerName string) (*tls.Config, error) { certPool := x509.NewCertPool() if ok := certPool.AppendCertsFromPEM(caCert); !ok { diff --git a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/util.go b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/util.go index e3a64c0ff95..d7b92493a98 100644 --- a/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/util.go +++ b/vendor/github.com/longhorn/longhorn-instance-manager/pkg/util/util.go @@ -7,7 +7,7 @@ import ( "net" "os" "os/exec" - "path" + "regexp" "strconv" "strings" "time" @@ -18,8 +18,6 @@ import ( "k8s.io/mount-utils" spdkhelpertypes "github.com/longhorn/go-spdk-helper/pkg/types" - - "github.com/longhorn/longhorn-instance-manager/pkg/types" ) const ( @@ -158,12 +156,10 @@ func GetVolumeMountPointMap() (map[string]mount.MountPoint, error) { return nil, err } + regex := regexp.MustCompile(`.*/globalmount$`) + for _, mp := range mountPoints { - match, err := path.Match(types.GlobalMountPathPattern, mp.Path) - if err != nil { - return nil, err - } - if match { + if regex.MatchString(mp.Path) { volumeNameSHAStr := GetVolumeNameSHAStrFromPath(mp.Path) volumeMountPointMap[volumeNameSHAStr] = mp } diff --git a/vendor/github.com/longhorn/longhorn-manager/datastore/longhorn.go b/vendor/github.com/longhorn/longhorn-manager/datastore/longhorn.go index 34b84dc8934..a3375884409 100644 --- a/vendor/github.com/longhorn/longhorn-manager/datastore/longhorn.go +++ b/vendor/github.com/longhorn/longhorn-manager/datastore/longhorn.go @@ -54,7 +54,7 @@ func (s *DataStore) UpdateCustomizedSettings(defaultImages map[types.SettingName return err } - availableCustomizedDefaultSettings := s.filterCustomizedDefaultSettings(customizedDefaultSettings) + availableCustomizedDefaultSettings := s.filterCustomizedDefaultSettings(customizedDefaultSettings, defaultSettingCM.ResourceVersion) if err := s.applyCustomizedDefaultSettingsToDefinitions(availableCustomizedDefaultSettings); err != nil { return err @@ -96,11 +96,11 @@ func (s *DataStore) createNonExistingSettingCRsWithDefaultSetting(configMapResou return nil } -func (s *DataStore) filterCustomizedDefaultSettings(customizedDefaultSettings map[string]string) map[string]string { +func (s *DataStore) filterCustomizedDefaultSettings(customizedDefaultSettings map[string]string, defaultSettingCMResourceVersion string) map[string]string { availableCustomizedDefaultSettings := make(map[string]string) for name, value := range customizedDefaultSettings { - if !s.isSettingValueChanged(types.SettingName(name), value) { + if !s.shouldApplyCustomizedSettingValue(types.SettingName(name), value, defaultSettingCMResourceVersion) { continue } @@ -114,8 +114,7 @@ func (s *DataStore) filterCustomizedDefaultSettings(customizedDefaultSettings ma return availableCustomizedDefaultSettings } -// isSettingValueChanged check if the customized default setting and value was changed -func (s *DataStore) isSettingValueChanged(name types.SettingName, value string) bool { +func (s *DataStore) shouldApplyCustomizedSettingValue(name types.SettingName, value string, defaultSettingCMResourceVersion string) bool { setting, err := s.GetSettingExactRO(name) if err != nil { if !ErrorIsNotFound(err) { @@ -124,10 +123,20 @@ func (s *DataStore) isSettingValueChanged(name types.SettingName, value string) } return true } - if setting.Value == value { - return false + + configMapResourceVersion := "" + if setting.Annotations != nil { + configMapResourceVersion = setting.Annotations[types.GetLonghornLabelKey(types.ConfigMapResourceVersionKey)] } - return true + + // Also check the setting definition. + definition, ok := types.GetSettingDefinition(name) + if !ok { + logrus.Errorf("customized default setting %v is not defined", name) + return true + } + + return (setting.Value != value || configMapResourceVersion != defaultSettingCMResourceVersion || definition.Default != value) } func (s *DataStore) syncSettingsWithDefaultImages(defaultImages map[types.SettingName]string) error { @@ -197,16 +206,6 @@ func (s *DataStore) applyCustomizedDefaultSettingsToDefinitions(customizedDefaul func (s *DataStore) syncSettingCRsWithCustomizedDefaultSettings(customizedDefaultSettings map[string]string, defaultSettingCMResourceVersion string) error { for _, sName := range types.SettingNameList { - configMapResourceVersion := "" - if s, err := s.GetSettingExactRO(sName); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } else { - if s.Annotations != nil { - configMapResourceVersion = s.Annotations[types.GetLonghornLabelKey(types.ConfigMapResourceVersionKey)] - } - } definition, ok := types.GetSettingDefinition(sName) if !ok { @@ -218,13 +217,12 @@ func (s *DataStore) syncSettingCRsWithCustomizedDefaultSettings(customizedDefaul continue } - if configMapResourceVersion != defaultSettingCMResourceVersion { - if definition.Required && value == "" { - continue - } - if err := s.createOrUpdateSetting(sName, value, defaultSettingCMResourceVersion); err != nil { - return err - } + if definition.Required && value == "" { + continue + } + + if err := s.createOrUpdateSetting(sName, value, defaultSettingCMResourceVersion); err != nil { + return err } } @@ -1079,8 +1077,7 @@ func GetNewCurrentEngineAndExtras(v *longhorn.Volume, es map[string]*longhorn.En oldEngineName = e.Name } if (v.Spec.NodeID != "" && v.Spec.NodeID == e.Spec.NodeID) || - (v.Status.CurrentNodeID != "" && v.Status.CurrentNodeID == e.Spec.NodeID) || - (v.Status.PendingNodeID != "" && v.Status.PendingNodeID == e.Spec.NodeID) { + (v.Status.CurrentNodeID != "" && v.Status.CurrentNodeID == e.Spec.NodeID) { if currentEngine != nil { return nil, nil, fmt.Errorf("BUG: found the second new active engine %v besides %v", e.Name, currentEngine.Name) } @@ -3052,6 +3049,7 @@ func (s *DataStore) ResetMonitoringEngineStatus(e *longhorn.Engine) (*longhorn.E e.Status.Endpoint = "" e.Status.LastRestoredBackup = "" e.Status.ReplicaModeMap = nil + e.Status.ReplicaTransitionTimeMap = nil e.Status.RestoreStatus = nil e.Status.PurgeStatus = nil e.Status.RebuildStatus = nil @@ -4944,7 +4942,7 @@ func (s *DataStore) CreateBackupBackingImage(backupBackingImage *longhorn.Backup } obj, err := verifyCreation(ret.Name, "backup backing image", func(name string) (runtime.Object, error) { - return s.getBackupBackingImageRO(name) + return s.GetBackupBackingImageRO(name) }) if err != nil { return nil, err @@ -4964,7 +4962,7 @@ func (s *DataStore) UpdateBackupBackingImage(backupBackingImage *longhorn.Backup return nil, err } verifyUpdate(backupBackingImage.Name, obj, func(name string) (runtime.Object, error) { - return s.getBackupBackingImageRO(name) + return s.GetBackupBackingImageRO(name) }) return obj, nil } @@ -4977,7 +4975,7 @@ func (s *DataStore) UpdateBackupBackingImageStatus(backupBackingImage *longhorn. return nil, err } verifyUpdate(backupBackingImage.Name, obj, func(name string) (runtime.Object, error) { - return s.getBackupBackingImageRO(name) + return s.GetBackupBackingImageRO(name) }) return obj, nil } @@ -5008,14 +5006,14 @@ func (s *DataStore) RemoveFinalizerForBackupBackingImage(obj *longhorn.BackupBac return nil } -func (s *DataStore) getBackupBackingImageRO(name string) (*longhorn.BackupBackingImage, error) { +func (s *DataStore) GetBackupBackingImageRO(name string) (*longhorn.BackupBackingImage, error) { return s.backupBackingImageLister.BackupBackingImages(s.namespace).Get(name) } // GetBackupBackingImage returns a new BackupBackingImage object for the given name and // namespace func (s *DataStore) GetBackupBackingImage(name string) (*longhorn.BackupBackingImage, error) { - resultRO, err := s.getBackupBackingImageRO(name) + resultRO, err := s.GetBackupBackingImageRO(name) if err != nil { return nil, err } @@ -5038,3 +5036,7 @@ func (s *DataStore) ListBackupBackingImages() (map[string]*longhorn.BackupBackin } return itemMap, nil } + +func (s *DataStore) ListBackupBackingImagesRO() ([]*longhorn.BackupBackingImage, error) { + return s.backupBackingImageLister.BackupBackingImages(s.namespace).List(labels.Everything()) +} diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/backing_image_manager.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/backing_image_manager.go index 3b7701788c0..539a3b93b0e 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/backing_image_manager.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/backing_image_manager.go @@ -54,9 +54,10 @@ func (c *BackingImageManagerClient) parseBackingImageFileInfo(bi *bimapi.Backing return nil } return &longhorn.BackingImageFileInfo{ - Name: bi.Name, - UUID: bi.UUID, - Size: bi.Size, + Name: bi.Name, + UUID: bi.UUID, + Size: bi.Size, + VirtualSize: bi.VirtualSize, State: longhorn.BackingImageState(bi.Status.State), CurrentChecksum: bi.Status.CurrentChecksum, diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/backups.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/backups.go index 9bece398afc..10d21929b60 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/backups.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/backups.go @@ -167,7 +167,7 @@ func parseBackupVolumeNamesList(output string) ([]string, error) { } // BackupVolumeNameList returns a list of backup volume names -func (btc *BackupTargetClient) BackupVolumeNameList(destURL string, credential map[string]string) ([]string, error) { +func (btc *BackupTargetClient) BackupVolumeNameList() ([]string, error) { output, err := btc.ExecuteEngineBinary("backup", "ls", "--volume-only", btc.URL) if err != nil { if types.ErrorIsNotFound(err) { diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/engine.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/engine.go index 062970f6019..f4ba0196c91 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/engine.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/engine.go @@ -352,6 +352,10 @@ func (e *EngineBinary) MetricsGet(*longhorn.Engine) (*Metrics, error) { return nil, fmt.Errorf(ErrNotImplement) } +func (e *EngineBinary) RemountReadOnlyVolume(*longhorn.Engine) error { + return fmt.Errorf(ErrNotImplement) +} + // addFlags always adds required flags to args. In addition, if the engine version is high enough, it adds additional // engine identity validation flags. func (e *EngineBinary) addFlags(args []string) ([]string, error) { diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/enginesim.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/enginesim.go index 36ab0d06bca..bca0757d580 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/enginesim.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/enginesim.go @@ -190,7 +190,7 @@ func (e *EngineSimulator) SnapshotPurgeStatus(*longhorn.Engine) (map[string]*lon return nil, fmt.Errorf(ErrNotImplement) } -func (e *EngineSimulator) SnapshotBackup(engine *longhorn.Engine, backupName, snapName, backupTarget, +func (e *EngineSimulator) SnapshotBackup(engine *longhorn.Engine, snapshotName, backupName, backupTarget, backingImageName, backingImageChecksum, compressionMethod string, concurrentLimit int, storageClassName string, labels, credential map[string]string) (string, string, error) { return "", "", fmt.Errorf(ErrNotImplement) @@ -276,3 +276,7 @@ func (e *EngineSimulator) MetricsGet(*longhorn.Engine) (*Metrics, error) { func (e *EngineSimulator) CleanupBackupMountPoints() error { return fmt.Errorf(ErrNotImplement) } + +func (e *EngineSimulator) RemountReadOnlyVolume(*longhorn.Engine) error { + return fmt.Errorf(ErrNotImplement) +} diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/instance_manager.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/instance_manager.go index dd708f862c3..7d6e1a134ec 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/instance_manager.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/instance_manager.go @@ -21,7 +21,7 @@ import ( ) const ( - CurrentInstanceManagerAPIVersion = 5 + CurrentInstanceManagerAPIVersion = 6 MinInstanceManagerAPIVersion = 1 UnknownInstanceManagerAPIVersion = 0 @@ -784,3 +784,27 @@ func (c *InstanceManagerClient) VersionGet() (int, int, int, int, error) { return output.InstanceManagerAPIMinVersion, output.InstanceManagerAPIVersion, output.InstanceManagerProxyAPIMinVersion, output.InstanceManagerProxyAPIVersion, nil } + +func (c *InstanceManagerClient) LogSetLevel(dataEngine longhorn.DataEngineType, component, level string) error { + if err := CheckInstanceManagerCompatibility(c.apiMinVersion, c.apiVersion); err != nil { + return err + } + + if c.GetAPIVersion() < 6 { + return nil + } + + return c.instanceServiceGrpcClient.LogSetLevel(string(dataEngine), component, level) +} + +func (c *InstanceManagerClient) LogSetFlags(dataEngine longhorn.DataEngineType, component, flags string) error { + if err := CheckInstanceManagerCompatibility(c.apiMinVersion, c.apiVersion); err != nil { + return err + } + + if c.GetAPIVersion() < 6 { + return nil + } + + return c.instanceServiceGrpcClient.LogSetFlags(string(dataEngine), component, flags) +} diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/proxy_volume.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/proxy_volume.go index 4b49293194b..d7375377382 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/proxy_volume.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/proxy_volume.go @@ -53,3 +53,7 @@ func (p *Proxy) VolumeSnapshotMaxSizeSet(e *longhorn.Engine) error { return p.grpcClient.VolumeSnapshotMaxSizeSet(string(e.Spec.DataEngine), e.Name, e.Spec.VolumeName, p.DirectToURL(e), e.Spec.SnapshotMaxSize) } + +func (p *Proxy) RemountReadOnlyVolume(e *longhorn.Engine) error { + return p.grpcClient.RemountReadOnlyVolume(e.Spec.VolumeName) +} diff --git a/vendor/github.com/longhorn/longhorn-manager/engineapi/types.go b/vendor/github.com/longhorn/longhorn-manager/engineapi/types.go index a57efecf362..63d6be8a4f7 100644 --- a/vendor/github.com/longhorn/longhorn-manager/engineapi/types.go +++ b/vendor/github.com/longhorn/longhorn-manager/engineapi/types.go @@ -98,7 +98,7 @@ type EngineClient interface { SnapshotRevert(engine *longhorn.Engine, name string) error SnapshotPurge(engine *longhorn.Engine) error SnapshotPurgeStatus(engine *longhorn.Engine) (map[string]*longhorn.PurgeStatus, error) - SnapshotBackup(engine *longhorn.Engine, backupName, snapName, backupTarget, backingImageName, backingImageChecksum, compressionMethod string, concurrentLimit int, storageClassName string, labels, credential map[string]string) (string, string, error) + SnapshotBackup(engine *longhorn.Engine, snapshotName, backupName, backupTarget, backingImageName, backingImageChecksum, compressionMethod string, concurrentLimit int, storageClassName string, labels, credential map[string]string) (string, string, error) SnapshotBackupStatus(engine *longhorn.Engine, backupName, replicaAddress, replicaName string) (*longhorn.EngineBackupStatus, error) SnapshotCloneStatus(engine *longhorn.Engine) (map[string]*longhorn.SnapshotCloneStatus, error) SnapshotClone(engine *longhorn.Engine, snapshotName, fromEngineAddress, fromVolumeName, fromEngineName string, fileSyncHTTPClientTimeout int64) error @@ -111,6 +111,7 @@ type EngineClient interface { CleanupBackupMountPoints() error MetricsGet(engine *longhorn.Engine) (*Metrics, error) + RemountReadOnlyVolume(engine *longhorn.Engine) error } type EngineClientRequest struct { diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimage.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimage.go index 15cf0c12a3e..a68fc3e2d49 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimage.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimage.go @@ -56,6 +56,9 @@ type BackingImageStatus struct { UUID string `json:"uuid"` // +optional Size int64 `json:"size"` + // Virtual size of image, which may be larger than physical size. Will be zero until known (e.g. while a backing image is uploading) + // +optional + VirtualSize int64 `json:"virtualSize"` // +optional Checksum string `json:"checksum"` // +optional @@ -74,6 +77,7 @@ type BackingImageStatus struct { // +kubebuilder:printcolumn:name="UUID",type=string,JSONPath=`.status.uuid`,description="The system generated UUID" // +kubebuilder:printcolumn:name="SourceType",type=string,JSONPath=`.spec.sourceType`,description="The source of the backing image file data" // +kubebuilder:printcolumn:name="Size",type=string,JSONPath=`.status.size`,description="The backing image file size in each disk" +// +kubebuilder:printcolumn:name="VirtualSize",type=string,JSONPath=`.status.virtualSize`,description="The virtual size of the image (may be larger than file size)" // +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` // BackingImage is where Longhorn stores backing image object. diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimagemanager.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimagemanager.go index a7dfa00073e..303b2f81aee 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimagemanager.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/backingimagemanager.go @@ -20,6 +20,8 @@ type BackingImageFileInfo struct { // +optional Size int64 `json:"size"` // +optional + VirtualSize int64 `json:"virtualSize"` + // +optional State BackingImageState `json:"state"` // +optional CurrentChecksum string `json:"currentChecksum"` diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/engine.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/engine.go index 797a6676deb..838aa2ea2db 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/engine.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/engine.go @@ -158,6 +158,11 @@ type EngineStatus struct { // +nullable ReplicaModeMap map[string]ReplicaMode `json:"replicaModeMap"` // +optional + // ReplicaTransitionTimeMap records the time a replica in ReplicaModeMap transitions from one mode to another (or + // from not being in the ReplicaModeMap to being in it). This information is sometimes required by other controllers + // (e.g. the volume controller uses it to determine the correct value for replica.Spec.lastHealthyAt). + ReplicaTransitionTimeMap map[string]string `json:"replicaTransitionTimeMap"` + // +optional Endpoint string `json:"endpoint"` // +optional LastRestoredBackup string `json:"lastRestoredBackup"` diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/instancemanager.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/instancemanager.go index 18948686aa9..c3ef1c9ce5f 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/instancemanager.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/instancemanager.go @@ -124,6 +124,7 @@ type InstanceProcessStatus struct { // +optional ErrorMsg string `json:"errorMsg"` //+optional + //+nullable Conditions map[string]bool `json:"conditions"` // +optional Listen string `json:"listen"` diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/replica.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/replica.go index bea53703db6..e676f3028f9 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/replica.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/replica.go @@ -25,10 +25,37 @@ type ReplicaSpec struct { // +optional EngineName string `json:"engineName"` // +optional + // HealthyAt is set the first time a replica becomes read/write in an engine after creation or rebuild. HealthyAt + // indicates the time the last successful rebuild occurred. When HealthyAt is set, a replica is likely to have + // useful (though possibly stale) data. HealthyAt is cleared before a rebuild. HealthyAt may be later than the + // corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller + // acknowledges the change. HealthyAt string `json:"healthyAt"` // +optional + // LastHealthyAt is set every time a replica becomes read/write in an engine. Unlike HealthyAt, LastHealthyAt is + // never cleared. LastHealthyAt is not a reliable indicator of the state of a replica's data. For example, a + // replica with LastHealthyAt set may be in the middle of a rebuild. However, because it is never cleared, it can be + // compared to LastFailedAt to help prevent dangerous replica deletion in some corner cases. LastHealthyAt may be + // later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume + // controller acknowledges the change. + LastHealthyAt string `json:"lastHealthyAt"` + // +optional + // FailedAt is set when a running replica fails or when a running engine is unable to use a replica for any reason. + // FailedAt indicates the time the failure occurred. When FailedAt is set, a replica is likely to have useful + // (though possibly stale) data. A replica with FailedAt set must be rebuilt from a non-failed replica (or it can + // be used in a salvage if all replicas are failed). FailedAt is cleared before a rebuild or salvage. FailedAt may + // be later than the corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume + // controller acknowledges the change. FailedAt string `json:"failedAt"` // +optional + // LastFailedAt is always set at the same time as FailedAt. Unlike FailedAt, LastFailedAt is never cleared. + // LastFailedAt is not a reliable indicator of the state of a replica's data. For example, a replica with + // LastFailedAt may already be healthy and in use again. However, because it is never cleared, it can be compared to + // LastHealthyAt to help prevent dangerous replica deletion in some corner cases. LastFailedAt may be later than the + // corresponding entry in an engine's replicaTransitionTimeMap because it is set when the volume controller + // acknowledges the change. + LastFailedAt string `json:"lastFailedAt"` + // +optional DiskID string `json:"diskID"` // +optional DiskPath string `json:"diskPath"` diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/volumeattachment.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/volumeattachment.go index 2cf935c9743..b64242bf80c 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/volumeattachment.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/volumeattachment.go @@ -123,6 +123,18 @@ func GetAttachmentTicketID(attacherType AttacherType, id string) string { return retID } +func GetNodeIdOfAttachmentTicket(attachmentID string, va *VolumeAttachment) string { + if va == nil { + return "" + } + attachmentTicket, ok := va.Spec.AttachmentTickets[attachmentID] + if !ok { + return "" + } + + return attachmentTicket.NodeID +} + func IsAttachmentTicketSatisfied(attachmentID string, va *VolumeAttachment) bool { if va == nil { return false diff --git a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go index 3810cd2cedc..48cd18dada4 100644 --- a/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go +++ b/vendor/github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2/zz_generated.deepcopy.go @@ -1184,6 +1184,13 @@ func (in *EngineStatus) DeepCopyInto(out *EngineStatus) { (*out)[key] = val } } + if in.ReplicaTransitionTimeMap != nil { + in, out := &in.ReplicaTransitionTimeMap, &out.ReplicaTransitionTimeMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.BackupStatus != nil { in, out := &in.BackupStatus, &out.BackupStatus *out = make(map[string]*EngineBackupStatus, len(*in)) diff --git a/vendor/github.com/longhorn/longhorn-manager/manager/backupbackingimage.go b/vendor/github.com/longhorn/longhorn-manager/manager/backupbackingimage.go new file mode 100644 index 00000000000..fb8c92d4267 --- /dev/null +++ b/vendor/github.com/longhorn/longhorn-manager/manager/backupbackingimage.go @@ -0,0 +1,70 @@ +package manager + +import ( + longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/longhorn/longhorn-manager/util" + "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (m *VolumeManager) ListBackupBackingImagesSorted() ([]*longhorn.BackupBackingImage, error) { + backupBackingImageMap, err := m.ds.ListBackupBackingImages() + if err != nil { + return []*longhorn.BackupBackingImage{}, err + } + + backupBackingImageNames, err := util.SortKeys(backupBackingImageMap) + if err != nil { + return []*longhorn.BackupBackingImage{}, err + } + + backupBackingImages := make([]*longhorn.BackupBackingImage, len(backupBackingImageMap)) + for i, backupBackingImageName := range backupBackingImageNames { + backupBackingImages[i] = backupBackingImageMap[backupBackingImageName] + } + + return backupBackingImages, nil +} + +func (m *VolumeManager) GetBackupBackingImage(name string) (*longhorn.BackupBackingImage, error) { + return m.ds.GetBackupBackingImageRO(name) +} + +func (m *VolumeManager) DeleteBackupBackingImage(name string) error { + return m.ds.DeleteBackupBackingImage(name) +} + +func (m *VolumeManager) RestoreBackupBackingImage(name string) error { + return m.restoreBackingImage(name) +} + +func (m *VolumeManager) CreateBackupBackingImage(name string) error { + _, err := m.ds.GetBackingImageRO(name) + if err != nil { + return errors.Wrapf(err, "failed to get backing image %v", name) + } + + backupBackingImage, err := m.ds.GetBackupBackingImageRO(name) + if err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to check if backup backing image %v exists", name) + } + } + + if backupBackingImage == nil { + backupBackingImage := &longhorn.BackupBackingImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: longhorn.BackupBackingImageSpec{ + UserCreated: true, + }, + } + if _, err = m.ds.CreateBackupBackingImage(backupBackingImage); err != nil && !apierrors.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed to create backup backing image %s in the cluster", name) + } + } + + return nil +} diff --git a/vendor/github.com/longhorn/longhorn-manager/manager/volume.go b/vendor/github.com/longhorn/longhorn-manager/manager/volume.go index 1612b0c4426..5ae12601acd 100644 --- a/vendor/github.com/longhorn/longhorn-manager/manager/volume.go +++ b/vendor/github.com/longhorn/longhorn-manager/manager/volume.go @@ -431,6 +431,12 @@ func (m *VolumeManager) triggerBackupVolumeToSync(volume *longhorn.Volume) error backupVolume, err := m.ds.GetBackupVolume(backupVolumeName) if err != nil { + // The backup volume may be deleted already. + // hence it's better not to block the caller to continue the handlings like DR volume activation. + if apierrors.IsNotFound(err) { + logrus.Infof("Cannot find backup volume %v to trigger the sync-up, will skip it", backupVolumeName) + return nil + } return errors.Wrapf(err, "failed to get backup volume: %v", backupVolumeName) } requestSyncTime := metav1.Time{Time: time.Now().UTC()} @@ -789,7 +795,7 @@ func (m *VolumeManager) EngineUpgrade(volumeName, image string) (v *longhorn.Vol return nil, fmt.Errorf("cannot upgrade engine image for volume %v from image %v to image %v because the volume's current engine image %v is not deployed on the replicas' nodes or the node that the volume is attached to", v.Name, v.Spec.Image, image, v.Status.CurrentImage) } - if v.Spec.MigrationNodeID != "" { + if util.IsVolumeMigrating(v) { return nil, fmt.Errorf("cannot upgrade during migration") } @@ -1183,7 +1189,7 @@ func (m *VolumeManager) restoreBackingImage(biName string) error { if biName == "" { return nil } - bi, err := m.ds.GetBackingImage(biName) + bi, err := m.ds.GetBackingImageRO(biName) if err != nil { if !apierrors.IsNotFound(err) { return errors.Wrapf(err, "failed to get backing image %v", biName) @@ -1195,7 +1201,7 @@ func (m *VolumeManager) restoreBackingImage(biName string) error { } // try find the backup backing image - bbi, err := m.ds.GetBackupBackingImage(biName) + bbi, err := m.ds.GetBackupBackingImageRO(biName) if err != nil { return errors.Wrapf(err, "failed to get backup backing image %v", biName) } diff --git a/vendor/github.com/longhorn/longhorn-manager/scheduler/replica_scheduler.go b/vendor/github.com/longhorn/longhorn-manager/scheduler/replica_scheduler.go index f5cc47786c1..da7f915cfcd 100644 --- a/vendor/github.com/longhorn/longhorn-manager/scheduler/replica_scheduler.go +++ b/vendor/github.com/longhorn/longhorn-manager/scheduler/replica_scheduler.go @@ -20,6 +20,9 @@ const ( type ReplicaScheduler struct { ds *datastore.DataStore + + // Required for unit testing. + nowHandler func() time.Time } type Disk struct { @@ -40,6 +43,9 @@ type DiskSchedulingInfo struct { func NewReplicaScheduler(ds *datastore.DataStore) *ReplicaScheduler { rcScheduler := &ReplicaScheduler{ ds: ds, + + // Required for unit testing. + nowHandler: time.Now, } return rcScheduler } @@ -88,7 +94,7 @@ func (rcs *ReplicaScheduler) ScheduleReplica(replica *longhorn.Replica, replicas nodeDisksMap[node.Name] = disks } - diskCandidates, multiError := rcs.getDiskCandidates(nodeCandidates, nodeDisksMap, replicas, volume, true) + diskCandidates, multiError := rcs.getDiskCandidates(nodeCandidates, nodeDisksMap, replicas, volume, true, false) // there's no disk that fit for current replica if len(diskCandidates) == 0 { @@ -154,7 +160,17 @@ func getNodesWithEvictingReplicas(replicas map[string]*longhorn.Replica, nodeInf return nodesWithEvictingReplicas } -func (rcs *ReplicaScheduler) getDiskCandidates(nodeInfo map[string]*longhorn.Node, nodeDisksMap map[string]map[string]struct{}, replicas map[string]*longhorn.Replica, volume *longhorn.Volume, requireSchedulingCheck bool) (map[string]*Disk, util.MultiError) { +// getDiskCandidates returns a map of the most appropriate disks a replica can be scheduled to (assuming it can be +// scheduled at all). For example, consider a case in which there are two disks on nodes without a replica for a volume +// and two disks on nodes with a replica for the same volume. getDiskCandidates only returns the disks without a +// replica, even if the replica can legally be scheduled on all four disks. +// Some callers (e.g. CheckAndReuseFailedReplicas) do not consider a node or zone to be used if it contains a failed +// replica. ignoreFailedReplicas == true supports this use case. +func (rcs *ReplicaScheduler) getDiskCandidates(nodeInfo map[string]*longhorn.Node, + nodeDisksMap map[string]map[string]struct{}, + replicas map[string]*longhorn.Replica, + volume *longhorn.Volume, + requireSchedulingCheck, ignoreFailedReplicas bool) (map[string]*Disk, util.MultiError) { multiError := util.NewMultiError() nodeSoftAntiAffinity, err := rcs.ds.GetSettingAsBool(types.SettingNameReplicaSoftAntiAffinity) @@ -191,6 +207,17 @@ func (rcs *ReplicaScheduler) getDiskCandidates(nodeInfo map[string]*longhorn.Nod diskSoftAntiAffinity = volume.Spec.ReplicaDiskSoftAntiAffinity == longhorn.ReplicaDiskSoftAntiAffinityEnabled } + creatingNewReplicasForReplenishment := false + if volume.Status.Robustness == longhorn.VolumeRobustnessDegraded { + timeToReplacementReplica, _, err := rcs.timeToReplacementReplica(volume) + if err != nil { + err = errors.Wrap(err, "failed to get time until replica replacement") + multiError.Append(util.NewMultiError(err.Error())) + return map[string]*Disk{}, multiError + } + creatingNewReplicasForReplenishment = timeToReplacementReplica == 0 + } + getDiskCandidatesFromNodes := func(nodes map[string]*longhorn.Node) (diskCandidates map[string]*Disk, multiError util.MultiError) { diskCandidates = map[string]*Disk{} multiError = util.NewMultiError() @@ -206,7 +233,8 @@ func (rcs *ReplicaScheduler) getDiskCandidates(nodeInfo map[string]*longhorn.Nod return diskCandidates, multiError } - usedNodes, usedZones, onlyEvictingNodes, onlyEvictingZones := getCurrentNodesAndZones(replicas, nodeInfo) + usedNodes, usedZones, onlyEvictingNodes, onlyEvictingZones := getCurrentNodesAndZones(replicas, nodeInfo, + ignoreFailedReplicas, creatingNewReplicasForReplenishment) allowEmptyNodeSelectorVolume, err := rcs.ds.GetSettingAsBool(types.SettingNameAllowEmptyNodeSelectorVolume) if err != nil { @@ -523,7 +551,9 @@ func (rcs *ReplicaScheduler) CheckAndReuseFailedReplica(replicas map[string]*lon } } - diskCandidates, _ := rcs.getDiskCandidates(availableNodesInfo, availableNodeDisksMap, replicas, volume, false) + // Call getDiskCandidates with ignoreFailedReplicas == true since we want the list of candidates to include disks + // that already contain a failed replica. + diskCandidates, _ := rcs.getDiskCandidates(availableNodesInfo, availableNodeDisksMap, replicas, volume, false, true) var reusedReplica *longhorn.Replica for _, suggestDisk := range diskCandidates { @@ -565,7 +595,7 @@ func (rcs *ReplicaScheduler) RequireNewReplica(replicas map[string]*longhorn.Rep hasPotentiallyReusableReplica := false for _, r := range replicas { - if IsPotentiallyReusableReplica(r, hardNodeAffinity) { + if IsPotentiallyReusableReplica(r) { hasPotentiallyReusableReplica = true break } @@ -574,42 +604,25 @@ func (rcs *ReplicaScheduler) RequireNewReplica(replicas map[string]*longhorn.Rep return 0 } - // Otherwise Longhorn will relay the new replica creation then there is a chance to reuse failed replicas later. - settingValue, err := rcs.ds.GetSettingAsInt(types.SettingNameReplicaReplenishmentWaitInterval) + timeUntilNext, timeOfNext, err := rcs.timeToReplacementReplica(volume) if err != nil { - logrus.Errorf("Failed to get Setting ReplicaReplenishmentWaitInterval, will directly replenish a new replica: %v", err) - return 0 + msg := "Failed to get time until replica replacement, will directly replenish a new replica" + logrus.WithError(err).Errorf(msg) } - waitInterval := time.Duration(settingValue) * time.Second - lastDegradedAt, err := util.ParseTime(volume.Status.LastDegradedAt) - - if err != nil { - logrus.Errorf("Failed to get parse volume last degraded timestamp %v, will directly replenish a new replica: %v", volume.Status.LastDegradedAt, err) - return 0 - } - now := time.Now() - if now.After(lastDegradedAt.Add(waitInterval)) { - return 0 + if timeUntilNext > 0 { + // Adding another second to the checkBackDuration to avoid clock skew. + timeUntilNext = timeUntilNext + time.Second + logrus.Infof("Replica replenishment is delayed until %v", timeOfNext.Add(time.Second)) } - - logrus.Infof("Replica replenishment is delayed until %v", lastDegradedAt.Add(waitInterval)) - // Adding 1 more second to the check back interval to avoid clock skew - return lastDegradedAt.Add(waitInterval).Sub(now) + time.Second + return timeUntilNext } func (rcs *ReplicaScheduler) isFailedReplicaReusable(r *longhorn.Replica, v *longhorn.Volume, nodeInfo map[string]*longhorn.Node, hardNodeAffinity string) (bool, error) { - if r.Spec.FailedAt == "" { - return false, nil - } - if r.Spec.NodeID == "" || r.Spec.DiskID == "" { - return false, nil - } - if r.Spec.RebuildRetryCount >= FailedReplicaMaxRetryCount { - return false, nil - } - if r.Spec.EvictionRequested { + // All failedReusableReplicas are also potentiallyFailedReusableReplicas. + if !IsPotentiallyReusableReplica(r) { return false, nil } + if hardNodeAffinity != "" && r.Spec.NodeID != hardNodeAffinity { return false, nil } @@ -680,9 +693,9 @@ func (rcs *ReplicaScheduler) isFailedReplicaReusable(r *longhorn.Replica, v *lon return true, nil } -// IsPotentiallyReusableReplica is used to check if a failed replica is potentially reusable. -// A potentially reusable replica means this failed replica may be able to reuse it later but it’s not valid now due to node/disk down issue. -func IsPotentiallyReusableReplica(r *longhorn.Replica, hardNodeAffinity string) bool { +// IsPotentiallyReusableReplica checks if a failed replica is potentially reusable. A potentially reusable replica means +// this failed replica may be able to reuse it later but it’s not valid now due to node/disk down issue. +func IsPotentiallyReusableReplica(r *longhorn.Replica) bool { if r.Spec.FailedAt == "" { return false } @@ -695,9 +708,6 @@ func IsPotentiallyReusableReplica(r *longhorn.Replica, hardNodeAffinity string) if r.Spec.EvictionRequested { return false } - if hardNodeAffinity != "" && r.Spec.NodeID != hardNodeAffinity { - return false - } // TODO: Reuse failed replicas for a SPDK volume if datastore.IsDataEngineV2(r.Spec.DataEngine) { return false @@ -852,7 +862,14 @@ func findDiskSpecAndDiskStatusInNode(diskUUID string, node *longhorn.Node) (long return longhorn.DiskSpec{}, longhorn.DiskStatus{}, false } -func getCurrentNodesAndZones(replicas map[string]*longhorn.Replica, nodeInfo map[string]*longhorn.Node) (map[string]*longhorn.Node, +// getCurrentNodesAndZones returns the nodes and zones a replica is already scheduled to. +// - Some callers do not consider a node or zone to be used if it contains a failed replica. +// ignoreFailedReplicas == true supports this use case. +// - Otherwise, getCurrentNodesAndZones does not consider a node or zone to be occupied by a failed replica that can +// no longer be used or is likely actively being replaced. This makes nodes and zones with useless replicas +// available for scheduling. +func getCurrentNodesAndZones(replicas map[string]*longhorn.Replica, nodeInfo map[string]*longhorn.Node, + ignoreFailedReplicas, creatingNewReplicasForReplenishment bool) (map[string]*longhorn.Node, map[string]bool, map[string]bool, map[string]bool) { usedNodes := map[string]*longhorn.Node{} usedZones := map[string]bool{} @@ -860,31 +877,74 @@ func getCurrentNodesAndZones(replicas map[string]*longhorn.Replica, nodeInfo map onlyEvictingZones := map[string]bool{} for _, r := range replicas { - if r.Spec.NodeID != "" && r.DeletionTimestamp == nil && r.Spec.FailedAt == "" { - if node, ok := nodeInfo[r.Spec.NodeID]; ok { - if r.Spec.EvictionRequested { - if _, ok := usedNodes[r.Spec.NodeID]; !ok { - // This is an evicting replica on a thus far unused node. We won't change this again unless we - // find a non-evicting replica on this node. - onlyEvictingNodes[node.Name] = true - } - if used := usedZones[node.Status.Zone]; !used { - // This is an evicting replica in a thus far unused zone. We won't change this again unless we - // find a non-evicting replica in this zone. - onlyEvictingZones[node.Status.Zone] = true - } - } else { - // There is now at least one replica on this node and in this zone that is not evicting. - onlyEvictingNodes[node.Name] = false - onlyEvictingZones[node.Status.Zone] = false - } + if r.Spec.NodeID == "" { + continue + } + if r.DeletionTimestamp != nil { + continue + } + if r.Spec.FailedAt != "" { + if ignoreFailedReplicas { + continue + } + if !IsPotentiallyReusableReplica(r) { + continue // This replica can never be used again, so it does not count in scheduling decisions. + } + if creatingNewReplicasForReplenishment { + continue // Maybe this replica can be used again, but it is being actively replaced anyway. + } + } - usedNodes[node.Name] = node - // For empty zone label, we treat them as one zone. - usedZones[node.Status.Zone] = true + if node, ok := nodeInfo[r.Spec.NodeID]; ok { + if r.Spec.EvictionRequested { + if _, ok := usedNodes[r.Spec.NodeID]; !ok { + // This is an evicting replica on a thus far unused node. We won't change this again unless we + // find a non-evicting replica on this node. + onlyEvictingNodes[node.Name] = true + } + if used := usedZones[node.Status.Zone]; !used { + // This is an evicting replica in a thus far unused zone. We won't change this again unless we + // find a non-evicting replica in this zone. + onlyEvictingZones[node.Status.Zone] = true + } + } else { + // There is now at least one replica on this node and in this zone that is not evicting. + onlyEvictingNodes[node.Name] = false + onlyEvictingZones[node.Status.Zone] = false } + + usedNodes[node.Name] = node + // For empty zone label, we treat them as one zone. + usedZones[node.Status.Zone] = true } } return usedNodes, usedZones, onlyEvictingNodes, onlyEvictingZones } + +// timeToReplacementReplica returns the amount of time until Longhorn should create a new replica for a degraded volume, +// even if there are potentially reusable failed replicas. It returns 0 if replica-replenishment-wait-interval has +// elapsed and a new replica is needed right now. +func (rcs *ReplicaScheduler) timeToReplacementReplica(volume *longhorn.Volume) (time.Duration, time.Time, error) { + settingValue, err := rcs.ds.GetSettingAsInt(types.SettingNameReplicaReplenishmentWaitInterval) + if err != nil { + err = errors.Wrapf(err, "failed to get setting ReplicaReplenishmentWaitInterval") + return 0, time.Time{}, err + } + waitInterval := time.Duration(settingValue) * time.Second + + lastDegradedAt, err := util.ParseTime(volume.Status.LastDegradedAt) + if err != nil { + err = errors.Wrapf(err, "failed to parse last degraded timestamp %v", volume.Status.LastDegradedAt) + return 0, time.Time{}, err + } + + now := rcs.nowHandler() + timeOfNext := lastDegradedAt.Add(waitInterval) + if now.After(timeOfNext) { + // A replacement replica is needed now. + return 0, time.Time{}, nil + } + + return timeOfNext.Sub(now), timeOfNext, nil +} diff --git a/vendor/github.com/longhorn/longhorn-manager/types/setting.go b/vendor/github.com/longhorn/longhorn-manager/types/setting.go index a831d9381e0..5e8cbc3923b 100644 --- a/vendor/github.com/longhorn/longhorn-manager/types/setting.go +++ b/vendor/github.com/longhorn/longhorn-manager/types/setting.go @@ -19,7 +19,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "github.com/longhorn/longhorn-manager/meta" - "github.com/longhorn/longhorn-manager/util" longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" ) @@ -41,6 +40,9 @@ const ( SettingTypeInt = SettingType("int") SettingTypeBool = SettingType("bool") SettingTypeDeprecated = SettingType("deprecated") + + ValueIntRangeMinimum = "minimum" + ValueIntRangeMaximum = "maximum" ) type SettingName string @@ -124,6 +126,8 @@ const ( SettingNameV2DataEngine = SettingName("v2-data-engine") SettingNameV2DataEngineHugepageLimit = SettingName("v2-data-engine-hugepage-limit") SettingNameV2DataEngineGuaranteedInstanceManagerCPU = SettingName("v2-data-engine-guaranteed-instance-manager-cpu") + SettingNameV2DataEngineLogLevel = SettingName("v2-data-engine-log-level") + SettingNameV2DataEngineLogFlags = SettingName("v2-data-engine-log-flags") ) var ( @@ -201,6 +205,8 @@ var ( SettingNameV2DataEngine, SettingNameV2DataEngineHugepageLimit, SettingNameV2DataEngineGuaranteedInstanceManagerCPU, + SettingNameV2DataEngineLogLevel, + SettingNameV2DataEngineLogFlags, SettingNameOfflineReplicaRebuilding, SettingNameReplicaDiskSoftAntiAffinity, SettingNameAllowEmptyNodeSelectorVolume, @@ -230,6 +236,8 @@ type SettingDefinition struct { ReadOnly bool `json:"readOnly"` Default string `json:"default"` Choices []string `json:"options,omitempty"` // +optional + // Use map to present minimum and maximum value instead of using int directly, so we can omitempy and distinguish 0 or nil at the same time. + ValueIntRange map[string]int `json:"range,omitempty"` // +optional } var settingDefinitionsLock sync.RWMutex @@ -309,6 +317,8 @@ var ( SettingNameV2DataEngine: SettingDefinitionV2DataEngine, SettingNameV2DataEngineHugepageLimit: SettingDefinitionV2DataEngineHugepageLimit, SettingNameV2DataEngineGuaranteedInstanceManagerCPU: SettingDefinitionV2DataEngineGuaranteedInstanceManagerCPU, + SettingNameV2DataEngineLogLevel: SettingDefinitionV2DataEngineLogLevel, + SettingNameV2DataEngineLogFlags: SettingDefinitionV2DataEngineLogFlags, SettingNameOfflineReplicaRebuilding: SettingDefinitionOfflineReplicaRebuilding, SettingNameReplicaDiskSoftAntiAffinity: SettingDefinitionReplicaDiskSoftAntiAffinity, SettingNameAllowEmptyNodeSelectorVolume: SettingDefinitionAllowEmptyNodeSelectorVolume, @@ -354,6 +364,9 @@ var ( Required: true, ReadOnly: false, Default: "300", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionFailedBackupTTL = SettingDefinition{ @@ -367,6 +380,9 @@ var ( Required: true, ReadOnly: false, Default: "1440", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionRestoreVolumeRecurringJobs = SettingDefinition{ @@ -485,6 +501,9 @@ var ( Required: true, ReadOnly: false, Default: "100", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionStorageMinimalAvailablePercentage = SettingDefinition{ @@ -495,6 +514,10 @@ var ( Required: true, ReadOnly: false, Default: "25", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + ValueIntRangeMaximum: 100, + }, } SettingDefinitionStorageReservedPercentageForDefaultDisk = SettingDefinition{ @@ -505,6 +528,10 @@ var ( Required: true, ReadOnly: false, Default: "30", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + ValueIntRangeMaximum: 100, + }, } SettingDefinitionUpgradeChecker = SettingDefinition{ @@ -564,6 +591,10 @@ var ( Required: true, ReadOnly: false, Default: "3", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 1, + ValueIntRangeMaximum: 20, + }, } SettingDefinitionDefaultDataLocality = SettingDefinition{ @@ -777,6 +808,9 @@ var ( Required: true, ReadOnly: false, Default: "600", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionConcurrentReplicaRebuildPerNodeLimit = SettingDefinition{ @@ -792,6 +826,9 @@ var ( Required: true, ReadOnly: false, Default: "5", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionConcurrentVolumeBackupRestorePerNodeLimit = SettingDefinition{ @@ -804,6 +841,9 @@ var ( Required: true, ReadOnly: false, Default: "5", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionSystemManagedPodsImagePullPolicy = SettingDefinition{ @@ -862,6 +902,9 @@ var ( Required: true, ReadOnly: false, Default: "0", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionBackingImageCleanupWaitInterval = SettingDefinition{ @@ -872,6 +915,9 @@ var ( Required: true, ReadOnly: false, Default: "60", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionBackingImageRecoveryWaitInterval = SettingDefinition{ @@ -885,6 +931,9 @@ var ( Required: true, ReadOnly: false, Default: "300", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionGuaranteedInstanceManagerCPU = SettingDefinition{ @@ -905,6 +954,10 @@ var ( Required: true, ReadOnly: false, Default: "12", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + ValueIntRangeMaximum: 40, + }, } SettingDefinitionKubernetesClusterAutoscalerEnabled = SettingDefinition{ @@ -956,6 +1009,9 @@ var ( Required: false, ReadOnly: false, Default: "1", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionRecurringFailedJobsHistoryLimit = SettingDefinition{ @@ -967,6 +1023,9 @@ var ( Required: false, ReadOnly: false, Default: "1", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionRecurringJobMaxRetention = SettingDefinition{ @@ -977,6 +1036,10 @@ var ( Required: true, ReadOnly: false, Default: "100", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 1, + ValueIntRangeMaximum: MaxSnapshotNum, + }, } SettingDefinitionSupportBundleFailedHistoryLimit = SettingDefinition{ @@ -989,6 +1052,9 @@ var ( Required: false, ReadOnly: false, Default: "1", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionDeletingConfirmationFlag = SettingDefinition{ @@ -1011,6 +1077,10 @@ var ( Required: true, ReadOnly: false, Default: "8", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 8, + ValueIntRangeMaximum: 30, + }, } SettingDefinitionSnapshotDataIntegrity = SettingDefinition{ @@ -1093,6 +1163,10 @@ var ( Required: true, ReadOnly: false, Default: "30", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 5, + ValueIntRangeMaximum: 120, + }, } SettingDefinitionBackupCompressionMethod = SettingDefinition{ @@ -1122,6 +1196,9 @@ var ( Required: true, ReadOnly: false, Default: "2", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 1, + }, } SettingDefinitionRestoreConcurrentLimit = SettingDefinition{ @@ -1132,6 +1209,9 @@ var ( Required: true, ReadOnly: false, Default: "2", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 1, + }, } SettingDefinitionLogLevel = SettingDefinition{ @@ -1142,6 +1222,7 @@ var ( Required: true, ReadOnly: false, Default: "Info", + Choices: []string{"Panic", "Fatal", "Error", "Warn", "Info", "Debug", "Trace"}, } SettingDefinitionOfflineReplicaRebuilding = SettingDefinition{ @@ -1189,6 +1270,9 @@ var ( Required: true, ReadOnly: true, Default: "2048", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 0, + }, } SettingDefinitionV2DataEngineGuaranteedInstanceManagerCPU = SettingDefinition{ @@ -1203,6 +1287,10 @@ var ( Required: true, ReadOnly: false, Default: "1250", + ValueIntRange: map[string]int{ + ValueIntRangeMinimum: 1000, + ValueIntRangeMaximum: 8000, + }, } SettingDefinitionReplicaDiskSoftAntiAffinity = SettingDefinition{ @@ -1244,6 +1332,26 @@ var ( ReadOnly: false, Default: "false", } + + SettingDefinitionV2DataEngineLogLevel = SettingDefinition{ + DisplayName: "V2 Data Engine Log Level", + Description: "The log level used in SPDK target daemon (spdk_tgt) of V2 Data Engine. Supported values are: Disabled, Error, Warn, Notice, Info and Debug. By default Notice.", + Category: SettingCategoryV2DataEngine, + Type: SettingTypeString, + Required: true, + ReadOnly: false, + Default: "Notice", + } + + SettingDefinitionV2DataEngineLogFlags = SettingDefinition{ + DisplayName: "V2 Data Engine Log Flags", + Description: "The log flags used in SPDK target daemon (spdk_tgt) of V2 Data Engine.", + Category: SettingCategoryV2DataEngine, + Type: SettingTypeString, + Required: false, + ReadOnly: false, + Default: "", + } ) type NodeDownPodDeletionPolicy string @@ -1299,252 +1407,18 @@ func ValidateSetting(name, value string) (err error) { return fmt.Errorf("required setting %v shouldn't be empty", sName) } - switch sName { - case SettingNameBackupTarget: - u, err := url.Parse(value) - if err != nil { - return errors.Wrapf(err, "failed to parse %v as url", value) - } - - // Check whether have $ or , have been set in BackupTarget path - regStr := `[\$\,]` - if u.Scheme == "cifs" { - // The $ in SMB/CIFS URIs means that the share is hidden. - regStr = `[\,]` - } - - reg := regexp.MustCompile(regStr) - findStr := reg.FindAllString(u.Path, -1) - if len(findStr) != 0 { - return fmt.Errorf("value %s, contains %v", value, strings.Join(findStr, " or ")) - } - - // boolean - case SettingNameCreateDefaultDiskLabeledNodes: - fallthrough - case SettingNameAllowRecurringJobWhileVolumeDetached: - fallthrough - case SettingNameReplicaSoftAntiAffinity: - fallthrough - case SettingNameDisableSchedulingOnCordonedNode: - fallthrough - case SettingNameReplicaZoneSoftAntiAffinity: - fallthrough - case SettingNameAllowVolumeCreationWithDegradedAvailability: - fallthrough - case SettingNameAutoCleanupSystemGeneratedSnapshot: - fallthrough - case SettingNameAutoCleanupRecurringJobBackupSnapshot: - fallthrough - case SettingNameAutoDeletePodWhenVolumeDetachedUnexpectedly: - fallthrough - case SettingNameKubernetesClusterAutoscalerEnabled: - fallthrough - case SettingNameOrphanAutoDeletion: - fallthrough - case SettingNameDeletingConfirmationFlag: - fallthrough - case SettingNameRestoreVolumeRecurringJobs: - fallthrough - case SettingNameRemoveSnapshotsDuringFilesystemTrim: - fallthrough - case SettingNameFastReplicaRebuildEnabled: - fallthrough - case SettingNameUpgradeChecker: - fallthrough - case SettingNameV1DataEngine: - fallthrough - case SettingNameV2DataEngine: - fallthrough - case SettingNameAllowEmptyNodeSelectorVolume: - fallthrough - case SettingNameAllowEmptyDiskSelectorVolume: - fallthrough - case SettingNameAllowCollectingLonghornUsage: - fallthrough - case SettingNameDetachManuallyAttachedVolumesWhenCordoned: - fallthrough - case SettingNameReplicaDiskSoftAntiAffinity: - fallthrough - case SettingNameDisableSnapshotPurge: - if value != "true" && value != "false" { - return fmt.Errorf("value %v of setting %v should be true or false", value, sName) - } - - case SettingNameStorageOverProvisioningPercentage: - if _, err := strconv.Atoi(value); err != nil { - return errors.Wrapf(err, "value %v is not a number", value) - } - // additional check whether over provisioning percentage is positive - value, err := util.ConvertSize(value) - if err != nil { - return errors.Wrapf(err, "failed to parse %v as size", value) - } - if value < 0 { - return fmt.Errorf("value %v should be positive", value) - } - case SettingNameStorageReservedPercentageForDefaultDisk: - fallthrough - case SettingNameStorageMinimalAvailablePercentage: - if _, err := strconv.Atoi(value); err != nil { - return errors.Wrapf(err, "value %v is not a number", value) - } - // additional check whether minimal available percentage is between 0 to 100 - value, err := util.ConvertSize(value) - if err != nil { - return errors.Wrapf(err, "failed to parse %v as size", value) - } - if value < 0 || value > 100 { - return fmt.Errorf("value %v should between 0 to 100", value) - } - case SettingNameDefaultReplicaCount: - c, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "value %v is not number", value) - } - if err := ValidateReplicaCount(c); err != nil { - return errors.Wrapf(err, "failed to validate replica count %v", c) - } - case SettingNameReplicaAutoBalance: - if err := ValidateReplicaAutoBalance(longhorn.ReplicaAutoBalance(value)); err != nil { - return errors.Wrapf(err, "failed to validate replica auto balance: %v", value) - } - case SettingNameBackingImageCleanupWaitInterval: - fallthrough - case SettingNameBackingImageRecoveryWaitInterval: - fallthrough - case SettingNameReplicaReplenishmentWaitInterval: - fallthrough - case SettingNameConcurrentReplicaRebuildPerNodeLimit: - fallthrough - case SettingNameConcurrentBackupRestorePerNodeLimit: - fallthrough - case SettingNameConcurrentAutomaticEngineUpgradePerNodeLimit: - fallthrough - case SettingNameSupportBundleFailedHistoryLimit: - fallthrough - case SettingNameBackupstorePollInterval: - fallthrough - case SettingNameRecurringSuccessfulJobsHistoryLimit: - fallthrough - case SettingNameRecurringFailedJobsHistoryLimit: - fallthrough - case SettingNameFailedBackupTTL: - fallthrough - case SettingNameV2DataEngineHugepageLimit: - value, err := strconv.Atoi(value) - if err != nil { - errors.Wrapf(err, "value %v is not a number", value) - } - if value < 0 { - return fmt.Errorf("the value %v shouldn't be less than 0", value) - } - case SettingNameTaintToleration: - if _, err = UnmarshalTolerations(value); err != nil { - return errors.Wrapf(err, "the value of %v is invalid", sName) - } - case SettingNameSystemManagedComponentsNodeSelector: - if _, err = UnmarshalNodeSelector(value); err != nil { - return errors.Wrapf(err, "the value of %v is invalid", sName) - } - case SettingNameStorageNetwork: - if err = ValidateStorageNetwork(value); err != nil { - return errors.Wrapf(err, "the value of %v is invalid", sName) - } - case SettingNameReplicaFileSyncHTTPClientTimeout: - timeout, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "value %v is not a number", value) - } - - if timeout < 5 || timeout > 120 { - return fmt.Errorf("the value %v should be between 5 and 120", value) - } - case SettingNameRecurringJobMaxRetention: - maxNumber, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "value %v is not a number", value) - } - if maxNumber < 1 || maxNumber > MaxSnapshotNum { - return fmt.Errorf("the value %v should be between 1 and %v", maxNumber, MaxSnapshotNum) - } - case SettingNameEngineReplicaTimeout: - timeout, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "value %v is not a number", value) - } - - if timeout < 8 || timeout > 30 { - return fmt.Errorf("the value %v should be between 8 and 30", value) - } - case SettingNameBackupConcurrentLimit: - fallthrough - case SettingNameRestoreConcurrentLimit: - val, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "value %v is not a number", value) - } - - if val < 1 { - return fmt.Errorf("the value %v shouldn't be less than 1", value) - } - case SettingNameOfflineReplicaRebuilding: - if err = ValidateOfflineReplicaRebuilding(value); err != nil { - return errors.Wrapf(err, "the value of %v is invalid", sName) - } - case SettingNameSnapshotDataIntegrity: - if err = ValidateSnapshotDataIntegrity(value); err != nil { - return errors.Wrapf(err, "the value of %v is invalid", sName) - } - case SettingNameBackupCompressionMethod: - if err = ValidateBackupCompressionMethod(value); err != nil { - return errors.Wrapf(err, "the value of %v is invalid", sName) - } - case SettingNameSnapshotDataIntegrityCronJob: - schedule, err := cron.ParseStandard(value) - if err != nil { - return errors.Wrapf(err, "invalid cron job format: %v", value) - } - - runAt := schedule.Next(time.Unix(0, 0)) - nextRunAt := schedule.Next(runAt) + if err := validateBool(definition, value); err != nil { + return errors.Wrapf(err, "failed to validate the setting %v", sName) + } - logrus.Infof("The interval between two data integrity checks is %v seconds", nextRunAt.Sub(runAt).Seconds()) + if err := validateInt(definition, value); err != nil { + return errors.Wrapf(err, "failed to validate the setting %v", sName) + } - // multi-choices - case SettingNameNodeDownPodDeletionPolicy: - fallthrough - case SettingNameDefaultDataLocality: - fallthrough - case SettingNameNodeDrainPolicy: - fallthrough - case SettingNameSystemManagedPodsImagePullPolicy: - definition, _ := GetSettingDefinition(sName) - choices := definition.Choices - if !isValidChoice(choices, value) { - return fmt.Errorf("value %v is not a valid choice, available choices %v", value, choices) - } - case SettingNameGuaranteedInstanceManagerCPU: - i, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "guaranteed v1 data engine instance manager cpu value %v is not a valid integer", value) - } - if i < 0 || i > 40 { - return fmt.Errorf("guaranteed v1 data engine instance manager cpu value %v should be between 0 to 40", value) - } - case SettingNameV2DataEngineGuaranteedInstanceManagerCPU: - i, err := strconv.Atoi(value) - if err != nil { - return errors.Wrapf(err, "guaranteed v2 data engine instance manager cpu value %v is not a valid integer", value) - } - if i < 1000 || i > 8000 { - return fmt.Errorf("guaranteed v2 data engine instance manager cpu value %v should be between 1000 to 8000", value) - } - case SettingNameLogLevel: - if err := ValidateLogLevel(value); err != nil { - return errors.Wrapf(err, "failed to validate log level %v", value) - } + if err := validateString(sName, definition, value); err != nil { + return errors.Wrapf(err, "failed to validate the setting %v", sName) } + return nil } @@ -1734,3 +1608,110 @@ func GetDangerZoneSettings() sets.Set[SettingName] { return settingList } + +func validateBool(definition SettingDefinition, value string) error { + if definition.Type != SettingTypeBool { + return nil + } + + if value != "true" && value != "false" { + return fmt.Errorf("value %v should be true or false", value) + } + return nil +} + +func validateInt(definition SettingDefinition, value string) error { + if definition.Type != SettingTypeInt { + return nil + } + + intValue, err := strconv.Atoi(value) + if err != nil { + return errors.Wrapf(err, "value %v is not a number", value) + } + + valueIntRange := definition.ValueIntRange + if minValue, exists := valueIntRange[ValueIntRangeMinimum]; exists { + if intValue < minValue { + return fmt.Errorf("value %v should be larger than %v", intValue, minValue) + } + } + + if maxValue, exists := valueIntRange[ValueIntRangeMaximum]; exists { + if intValue > maxValue { + return fmt.Errorf("value %v should be less than %v", intValue, maxValue) + } + } + return nil +} + +func validateString(sName SettingName, definition SettingDefinition, value string) error { + if definition.Type != SettingTypeString { + return nil + } + + // multi-choices + if definition.Choices != nil && len(definition.Choices) > 0 { + if !isValidChoice(definition.Choices, value) { + return fmt.Errorf("value %v is not a valid choice, available choices %v", value, definition.Choices) + } + return nil + } + + switch sName { + case SettingNameSnapshotDataIntegrityCronJob: + schedule, err := cron.ParseStandard(value) + if err != nil { + return errors.Wrapf(err, "invalid cron job format: %v", value) + } + + runAt := schedule.Next(time.Unix(0, 0)) + nextRunAt := schedule.Next(runAt) + + logrus.Infof("The interval between two data integrity checks is %v seconds", nextRunAt.Sub(runAt).Seconds()) + + case SettingNameTaintToleration: + if _, err := UnmarshalTolerations(value); err != nil { + return errors.Wrapf(err, "the value of %v is invalid", sName) + } + case SettingNameSystemManagedComponentsNodeSelector: + if _, err := UnmarshalNodeSelector(value); err != nil { + return errors.Wrapf(err, "the value of %v is invalid", sName) + } + + case SettingNameBackupTarget: + u, err := url.Parse(value) + if err != nil { + return errors.Wrapf(err, "failed to parse %v as url", value) + } + + // Check whether have $ or , have been set in BackupTarget path + regStr := `[\$\,]` + if u.Scheme == "cifs" { + // The $ in SMB/CIFS URIs means that the share is hidden. + regStr = `[\,]` + } + + reg := regexp.MustCompile(regStr) + findStr := reg.FindAllString(u.Path, -1) + if len(findStr) != 0 { + return fmt.Errorf("value %s, contains %v", value, strings.Join(findStr, " or ")) + } + + case SettingNameStorageNetwork: + if err := ValidateStorageNetwork(value); err != nil { + return errors.Wrapf(err, "the value of %v is invalid", sName) + } + + case SettingNameV2DataEngineLogLevel: + if err := ValidateV2DataEngineLogLevel(value); err != nil { + return errors.Wrapf(err, "failed to validate v2 data engine log level %v", value) + } + case SettingNameV2DataEngineLogFlags: + if err := ValidateV2DataEngineLogFlags(value); err != nil { + return errors.Wrapf(err, "failed to validate v2 data engine log flags %v", value) + } + } + + return nil +} diff --git a/vendor/github.com/longhorn/longhorn-manager/types/types.go b/vendor/github.com/longhorn/longhorn-manager/types/types.go index e703404dd83..527e94f4f33 100644 --- a/vendor/github.com/longhorn/longhorn-manager/types/types.go +++ b/vendor/github.com/longhorn/longhorn-manager/types/types.go @@ -725,16 +725,37 @@ func ErrorIsInvalidState(err error) bool { } func ValidateReplicaCount(count int) error { - if count < 1 || count > 20 { - return fmt.Errorf("replica count value must between 1 to 20") + + definition, _ := GetSettingDefinition(SettingNameDefaultReplicaCount) + valueIntRange := definition.ValueIntRange + + if count < valueIntRange[ValueIntRangeMinimum] || count > valueIntRange[ValueIntRangeMaximum] { + return fmt.Errorf("replica count value %v must between %v to %v", count, valueIntRange[ValueIntRangeMinimum], valueIntRange[ValueIntRangeMaximum]) } return nil } -func ValidateLogLevel(level string) error { - if _, err := logrus.ParseLevel(level); err != nil { - return fmt.Errorf("log level is invalid") +func ValidateV2DataEngineLogLevel(level string) error { + switch strings.ToLower(level) { + case "disabled", "error", "warn", "notice", "info", "debug": + return nil + default: + return fmt.Errorf("log level %s is invalid", level) + } +} + +func ValidateV2DataEngineLogFlags(flags string) error { + if flags == "" { + return nil } + + pattern := "^[a-zA-Z,]+$" + reg := regexp.MustCompile(pattern) + + if !reg.MatchString(flags) { + return fmt.Errorf("log flags %s is invalid", flags) + } + return nil } @@ -1014,18 +1035,15 @@ func ValidateCPUReservationValues(settingName SettingName, instanceManagerCPUStr return errors.Wrapf(err, "invalid guaranteed/requested instance manager CPU value (%v)", instanceManagerCPUStr) } + definition, _ := GetSettingDefinition(settingName) + valueIntRange := definition.ValueIntRange + switch settingName { - case SettingNameGuaranteedInstanceManagerCPU: - isUnderLimit := instanceManagerCPU < 0 - isOverLimit := instanceManagerCPU > 40 - if isUnderLimit || isOverLimit { - return fmt.Errorf("invalid requested v1 data engine instance manager CPUs. Valid instance manager CPU range between 0%% - 40%%") - } - case SettingNameV2DataEngineGuaranteedInstanceManagerCPU: - isUnderLimit := instanceManagerCPU < 1000 - isOverLimit := instanceManagerCPU > 8000 + case SettingNameGuaranteedInstanceManagerCPU, SettingNameV2DataEngineGuaranteedInstanceManagerCPU: + isUnderLimit := instanceManagerCPU < valueIntRange[ValueIntRangeMinimum] + isOverLimit := instanceManagerCPU > valueIntRange[ValueIntRangeMaximum] if isUnderLimit || isOverLimit { - return fmt.Errorf("invalid requested v2 data engine instance manager CPUs. Valid instance manager CPU range between 1000 - 8000 millicpu") + return fmt.Errorf("invalid requested instance manager CPUs. Valid instance manager CPU range between %v - %v millicpu", valueIntRange[ValueIntRangeMinimum], valueIntRange[ValueIntRangeMaximum]) } } return nil diff --git a/vendor/github.com/longhorn/longhorn-manager/util/util.go b/vendor/github.com/longhorn/longhorn-manager/util/util.go index 8eff1386a86..e6612e242c6 100644 --- a/vendor/github.com/longhorn/longhorn-manager/util/util.go +++ b/vendor/github.com/longhorn/longhorn-manager/util/util.go @@ -261,6 +261,20 @@ func TimestampWithinLimit(latest time.Time, ts string, limit time.Duration) bool return deadline.After(latest) } +// TimestampAfterTimestamp returns true if timestamp1 is after timestamp2. It returns false otherwise and an error if +// either timestamp cannot be parsed. +func TimestampAfterTimestamp(timestamp1 string, timestamp2 string) (bool, error) { + time1, err := time.Parse(time.RFC3339, timestamp1) + if err != nil { + return false, errors.Wrapf(err, "cannot parse timestamp %v", timestamp1) + } + time2, err := time.Parse(time.RFC3339, timestamp2) + if err != nil { + return false, errors.Wrapf(err, "cannot parse timestamp %v", timestamp2) + } + return time1.After(time2), nil +} + func ValidateString(name string) bool { validName := regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) return validName.MatchString(name) @@ -720,7 +734,7 @@ func TrimFilesystem(volumeName string, encryptedDevice bool) error { return err } - _, err = nsexec.Execute(lhtypes.BinaryFstrim, []string{validMountpoint}, lhtypes.ExecuteDefaultTimeout) + _, err = nsexec.Execute(nil, lhtypes.BinaryFstrim, []string{validMountpoint}, lhtypes.ExecuteDefaultTimeout) if err != nil { return errors.Wrapf(err, "cannot find volume %v mount info on host", volumeName) } diff --git a/vendor/github.com/longhorn/longhorn-manager/util/volume.go b/vendor/github.com/longhorn/longhorn-manager/util/volume.go new file mode 100644 index 00000000000..2987bc90f0e --- /dev/null +++ b/vendor/github.com/longhorn/longhorn-manager/util/volume.go @@ -0,0 +1,12 @@ +package util + +// Use util/volume.go for volume related utility functions that would otherwise belong in controller/utils.go except +// that they are used by other components as well (e.g. manager). + +import ( + longhorn "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" +) + +func IsVolumeMigrating(v *longhorn.Volume) bool { + return v.Spec.MigrationNodeID != "" || v.Status.CurrentMigrationNodeID != "" +} diff --git a/vendor/github.com/longhorn/longhorn-share-manager/pkg/rpc/server.go b/vendor/github.com/longhorn/longhorn-share-manager/pkg/rpc/server.go index 7bb69db786c..c7b28343c99 100644 --- a/vendor/github.com/longhorn/longhorn-share-manager/pkg/rpc/server.go +++ b/vendor/github.com/longhorn/longhorn-share-manager/pkg/rpc/server.go @@ -7,7 +7,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/ptypes/empty" "github.com/google/fscrypt/filesystem" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -20,7 +19,7 @@ import ( grpcstatus "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/emptypb" - lhns "github.com/longhorn/go-common-libs/ns" + lhexec "github.com/longhorn/go-common-libs/exec" lhtypes "github.com/longhorn/go-common-libs/types" "github.com/longhorn/longhorn-share-manager/pkg/server" @@ -105,13 +104,8 @@ func (s *ShareManagerServer) FilesystemTrim(ctx context.Context, req *Filesystem return &emptypb.Empty{}, grpcstatus.Error(grpccodes.Internal, err.Error()) } - namespaces := []lhtypes.Namespace{lhtypes.NamespaceMnt, lhtypes.NamespaceNet} - nsexec, err := lhns.NewNamespaceExecutor(lhns.GetDefaultProcessName(), lhtypes.HostProcDirectory, namespaces) - if err != nil { - return &empty.Empty{}, grpcstatus.Error(grpccodes.Internal, err.Error()) - } - - _, err = nsexec.Execute(lhtypes.BinaryFstrim, []string{mountPath}, lhtypes.ExecuteDefaultTimeout) + execute := lhexec.NewExecutor().Execute + _, err = execute([]string{}, lhtypes.BinaryFstrim, []string{mountPath}, lhtypes.ExecuteDefaultTimeout) if err != nil { return &emptypb.Empty{}, grpcstatus.Error(grpccodes.Internal, err.Error()) } diff --git a/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.pb.go b/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.pb.go index d7dd87df62a..43df8ff3972 100644 --- a/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.pb.go +++ b/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.pb.go @@ -86,6 +86,7 @@ type Lvol struct { Parent string `protobuf:"bytes,5,opt,name=parent,proto3" json:"parent,omitempty"` Children map[string]bool `protobuf:"bytes,6,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` CreationTime string `protobuf:"bytes,7,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + UserCreated bool `protobuf:"varint,8,opt,name=user_created,json=userCreated,proto3" json:"user_created,omitempty"` } func (x *Lvol) Reset() { @@ -169,6 +170,13 @@ func (x *Lvol) GetCreationTime() string { return "" } +func (x *Lvol) GetUserCreated() bool { + if x != nil { + return x.UserCreated + } + return false +} + type Replica struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1602,6 +1610,7 @@ type SnapshotRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` SnapshotName string `protobuf:"bytes,2,opt,name=snapshot_name,json=snapshotName,proto3" json:"snapshot_name,omitempty"` + UserCreated bool `protobuf:"varint,3,opt,name=user_created,json=userCreated,proto3" json:"user_created,omitempty"` } func (x *SnapshotRequest) Reset() { @@ -1650,6 +1659,13 @@ func (x *SnapshotRequest) GetSnapshotName() string { return "" } +func (x *SnapshotRequest) GetUserCreated() bool { + if x != nil { + return x.UserCreated + } + return false +} + type SnapshotResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3023,6 +3039,194 @@ func (x *DiskDeleteRequest) GetDiskUuid() string { return "" } +type LogSetLevelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Level string `protobuf:"bytes,1,opt,name=level,proto3" json:"level,omitempty"` +} + +func (x *LogSetLevelRequest) Reset() { + *x = LogSetLevelRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogSetLevelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogSetLevelRequest) ProtoMessage() {} + +func (x *LogSetLevelRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogSetLevelRequest.ProtoReflect.Descriptor instead. +func (*LogSetLevelRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDescGZIP(), []int{43} +} + +func (x *LogSetLevelRequest) GetLevel() string { + if x != nil { + return x.Level + } + return "" +} + +type LogSetFlagsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Flags string `protobuf:"bytes,1,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *LogSetFlagsRequest) Reset() { + *x = LogSetFlagsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogSetFlagsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogSetFlagsRequest) ProtoMessage() {} + +func (x *LogSetFlagsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogSetFlagsRequest.ProtoReflect.Descriptor instead. +func (*LogSetFlagsRequest) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDescGZIP(), []int{44} +} + +func (x *LogSetFlagsRequest) GetFlags() string { + if x != nil { + return x.Flags + } + return "" +} + +type LogGetLevelResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Level string `protobuf:"bytes,1,opt,name=level,proto3" json:"level,omitempty"` +} + +func (x *LogGetLevelResponse) Reset() { + *x = LogGetLevelResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogGetLevelResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogGetLevelResponse) ProtoMessage() {} + +func (x *LogGetLevelResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogGetLevelResponse.ProtoReflect.Descriptor instead. +func (*LogGetLevelResponse) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDescGZIP(), []int{45} +} + +func (x *LogGetLevelResponse) GetLevel() string { + if x != nil { + return x.Level + } + return "" +} + +type LogGetFlagsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Flags string `protobuf:"bytes,1,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *LogGetFlagsResponse) Reset() { + *x = LogGetFlagsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogGetFlagsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogGetFlagsResponse) ProtoMessage() {} + +func (x *LogGetFlagsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogGetFlagsResponse.ProtoReflect.Descriptor instead. +func (*LogGetFlagsResponse) Descriptor() ([]byte, []int) { + return file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDescGZIP(), []int{46} +} + +func (x *LogGetFlagsResponse) GetFlags() string { + if x != nil { + return x.Flags + } + return "" +} + var File_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto protoreflect.FileDescriptor var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDesc = []byte{ @@ -3032,7 +3236,7 @@ var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDe 0x2f, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x70, 0x64, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9f, 0x02, 0x0a, 0x04, 0x4c, 0x76, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2, 0x02, 0x0a, 0x04, 0x4c, 0x76, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, @@ -3046,700 +3250,733 @@ var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDe 0x6c, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, - 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdd, 0x03, 0x0a, 0x07, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdd, + 0x03, 0x0a, 0x07, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x6c, 0x76, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6c, 0x76, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x76, 0x73, + 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x76, 0x73, + 0x55, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x07, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x04, + 0x68, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x64, + 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, + 0x3d, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x1e, + 0x0a, 0x0a, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, + 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, + 0x67, 0x1a, 0x4b, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, + 0x76, 0x6f, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, + 0x01, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x76, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x76, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x76, 0x73, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x76, 0x73, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, - 0x1d, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x04, 0x68, 0x65, 0x61, - 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, - 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x3d, 0x0a, 0x09, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x72, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0a, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x4b, - 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x14, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x76, 0x73, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x76, 0x73, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x76, 0x73, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x76, 0x73, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, - 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6c, 0x65, 0x61, 0x6e, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x27, 0x0a, 0x11, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, - 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x08, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, - 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x1a, 0x4d, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x9f, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x64, - 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x76, 0x6f, 0x6c, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x73, 0x74, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x76, 0x6f, 0x6c, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x22, 0x61, 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x46, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, - 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x28, 0x0a, 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x73, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x64, - 0x73, 0x74, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x76, - 0x6f, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x18, 0x64, 0x73, 0x74, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x4c, - 0x76, 0x6f, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x61, 0x0a, 0x21, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, - 0x72, 0x63, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, - 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x5c, 0x0a, - 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x53, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x6f, 0x72, + 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6c, + 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x27, 0x0a, + 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x5f, 0x0a, 0x20, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, - 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x78, - 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x3d, 0x0a, 0x21, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, - 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x64, 0x0a, 0x21, 0x52, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, + 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x1a, 0x4d, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9f, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x73, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x64, 0x73, 0x74, 0x5f, + 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6c, 0x76, 0x6f, 0x6c, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, + 0x73, 0x74, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x76, 0x6f, 0x6c, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x61, 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x46, + 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x28, 0x0a, 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x73, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, - 0x44, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x53, 0x72, 0x63, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x6e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x64, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3d, + 0x0a, 0x1b, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x76, 0x6f, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x73, 0x74, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, + 0x6e, 0x67, 0x4c, 0x76, 0x6f, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x61, 0x0a, + 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, + 0x6e, 0x67, 0x53, 0x72, 0x63, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x64, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x5c, 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x53, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x5f, + 0x0a, 0x20, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x75, 0x6e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x22, 0xfe, 0x05, 0x0a, 0x06, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x4d, 0x0a, 0x10, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, - 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, - 0x64, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x70, 0x12, 0x21, 0x0a, 0x04, 0x68, 0x65, - 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, - 0x70, 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x3c, 0x0a, - 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x44, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x13, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, + 0x0e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, + 0x3d, 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x64, + 0x0a, 0x21, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x6e, 0x65, 0x78, 0x70, + 0x6f, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x75, 0x6e, 0x65, 0x78, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x22, 0xfe, 0x05, 0x0a, 0x06, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x12, 0x4d, + 0x0a, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x6d, + 0x61, 0x70, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, + 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x70, 0x12, 0x21, 0x0a, + 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, + 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, + 0x12, 0x3c, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x73, 0x67, 0x1a, 0x44, 0x0a, 0x16, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, - 0x70, 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xcd, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x63, 0x0a, 0x13, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, - 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x70, 0x64, 0x6b, - 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, - 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0x44, 0x0a, 0x16, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x57, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x29, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x26, 0x0a, - 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4c, 0x69, - 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, - 0x1a, 0x4b, 0x0a, 0x0c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x86, 0x01, - 0x0a, 0x17, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, - 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, - 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3b, 0x0a, 0x18, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x19, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x4c, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x1a, - 0x4d, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x89, - 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, - 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x4a, 0x0a, 0x0f, 0x53, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x73, 0x70, + 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x76, 0x6f, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcd, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x10, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, - 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, - 0x87, 0x03, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x67, - 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x41, 0x50, - 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, - 0x63, 0x6c, 0x69, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x41, 0x50, 0x49, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x69, 0x41, 0x50, 0x49, 0x4d, - 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x6c, 0x65, 0x72, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, - 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x41, 0x50, 0x49, 0x4d, 0x69, - 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x41, 0x50, 0x49, 0x4d, 0x69, 0x6e, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x64, 0x61, 0x74, 0x61, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x64, 0x61, 0x74, 0x61, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x14, 0x64, 0x61, 0x74, 0x61, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4d, - 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x15, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x05, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, - 0x75, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4c, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, - 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, - 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, - 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x70, 0x65, 0x63, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x63, 0x0a, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, + 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x4d, 0x61, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, + 0x44, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x29, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x26, 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x73, 0x1a, 0x4b, 0x0a, 0x0c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x86, 0x01, 0x0a, 0x17, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3b, 0x0a, 0x18, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x19, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x1a, 0x4d, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x89, 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x6d, 0x0a, + 0x0f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x75, 0x73, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x37, 0x0a, 0x10, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x87, 0x03, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x44, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, + 0x0d, 0x63, 0x6c, 0x69, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x41, 0x50, 0x49, 0x4d, 0x69, 0x6e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x63, + 0x6c, 0x69, 0x41, 0x50, 0x49, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x32, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x41, 0x50, 0x49, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x41, 0x50, 0x49, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x41, 0x50, 0x49, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x41, 0x50, 0x49, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, + 0x11, 0x64, 0x61, 0x74, 0x61, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x64, + 0x61, 0x74, 0x61, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x64, 0x61, 0x74, 0x61, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x4d, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, + 0x49, 0x0a, 0x15, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x64, 0x6b, + 0x72, 0x70, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x05, 0x0a, 0x13, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4c, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x62, + 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x62, 0x61, 0x63, + 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x69, + 0x6e, 0x67, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, + 0x1f, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7e, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x69, 0x6e, + 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x69, 0x73, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x77, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x22, 0xcb, 0x01, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x55, 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xc0, + 0x02, 0x0a, 0x1a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x63, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x7e, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x73, 0x49, 0x6e, 0x63, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x22, 0x77, 0x0a, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xcb, 0x01, 0x0a, 0x14, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0xc0, 0x02, 0x0a, 0x1a, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x53, 0x0a, 0x0a, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x3d, 0x0a, 0x0f, - 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa2, 0x01, 0x0a, 0x1b, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x06, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x01, 0x22, 0xa2, 0x01, 0x0a, 0x1b, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x43, 0x0a, 0x20, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc4, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, - 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x55, 0x72, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x0a, - 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x1a, 0x3d, 0x0a, - 0x0f, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x37, 0x0a, 0x14, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x40, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xf9, 0x02, 0x0a, 0x1c, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x24, - 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x46, 0x69, 0x6c, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x22, 0xbd, 0x01, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x1a, 0x60, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x94, 0x02, 0x0a, 0x04, 0x44, 0x69, 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x72, 0x65, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, - 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x72, 0x65, 0x65, 0x5f, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x66, 0x72, - 0x65, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x89, 0x01, 0x0a, 0x11, 0x44, - 0x69, 0x73, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x65, 0x12, 0x48, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x30, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x43, 0x0a, 0x20, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc4, 0x02, 0x0a, 0x1b, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x72, 0x65, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x63, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x1a, 0x3d, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x37, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x40, 0x0a, 0x1b, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xf9, 0x02, + 0x0a, 0x1c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x69, 0x73, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, + 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, + 0x73, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x55, 0x72, 0x6c, 0x12, + 0x38, 0x0a, 0x18, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x16, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x69, 0x6e, 0x67, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0xbd, 0x01, 0x0a, 0x15, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x60, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x94, 0x02, 0x0a, 0x04, 0x44, 0x69, + 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x66, 0x72, 0x65, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x66, 0x72, 0x65, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x66, 0x72, 0x65, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x69, 0x7a, 0x65, + 0x22, 0x89, 0x01, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x55, 0x75, 0x69, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, + 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2d, 0x0a, 0x0e, + 0x44, 0x69, 0x73, 0x6b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x44, + 0x69, 0x73, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x55, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, - 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, - 0x69, 0x73, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x2d, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x6b, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, - 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x6b, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, - 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, - 0x69, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, - 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, - 0x55, 0x75, 0x69, 0x64, 0x2a, 0x26, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, - 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, 0x57, 0x4f, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x52, - 0x57, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x87, 0x1a, 0x0a, - 0x0b, 0x53, 0x50, 0x44, 0x4b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0d, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, - 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x46, - 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x47, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x12, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x49, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x55, 0x75, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x12, 0x4c, 0x6f, + 0x67, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0x2a, 0x0a, 0x12, 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, + 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x6c, 0x61, + 0x67, 0x73, 0x22, 0x2b, 0x0a, 0x13, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, + 0x2b, 0x0a, 0x13, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x2a, 0x26, 0x0a, 0x0b, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, 0x57, + 0x4f, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x52, 0x57, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x45, + 0x52, 0x52, 0x10, 0x02, 0x32, 0x99, 0x1c, 0x0a, 0x0b, 0x53, 0x50, 0x44, 0x4b, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x46, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, + 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x73, + 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x43, 0x0a, 0x15, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, + 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, + 0x49, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, + 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x49, 0x0a, 0x15, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, + 0x65, 0x72, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x73, + 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x12, 0x49, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, - 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x74, - 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x60, + 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x29, 0x2e, 0x73, 0x70, + 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, + 0x12, 0x62, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x12, 0x2a, + 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x46, 0x69, 0x6e, + 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x60, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x29, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, - 0x72, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x41, 0x74, 0x74, 0x61, + 0x63, 0x68, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, + 0x63, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, - 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, - 0x6e, 0x67, 0x53, 0x72, 0x63, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x67, 0x53, 0x72, 0x63, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x1a, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, - 0x67, 0x53, 0x72, 0x63, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x52, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x1a, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, + 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, + 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x53, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x62, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x12, 0x2a, - 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x72, 0x63, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x68, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, - 0x70, 0x79, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x68, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x70, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x74, + 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x29, 0x2e, 0x73, 0x70, + 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, + 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, + 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x74, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x29, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, - 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, - 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x1a, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, - 0x67, 0x44, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x56, 0x0a, 0x22, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, - 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x56, 0x0a, 0x22, 0x52, 0x65, 0x70, 0x6c, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x22, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x12, 0x18, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x54, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, - 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x14, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x12, 0x24, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x12, 0x56, 0x0a, 0x22, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x54, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, + 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, + 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x24, 0x2e, 0x73, + 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x70, 0x64, + 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x37, 0x0a, 0x09, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, + 0x0a, 0x14, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x14, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, + 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, 0x73, - 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, + 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, + 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x57, + 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5c, 0x0a, 0x11, 0x45, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x21, 0x2e, + 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x73, 0x70, 0x64, + 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, + 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x12, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x53, 0x0a, 0x12, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, + 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x23, 0x2e, + 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x19, 0x45, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x12, 0x29, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, + 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x13, + 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x0c, 0x45, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x70, 0x64, 0x6b, - 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x44, 0x0a, 0x0c, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x37, 0x0a, 0x09, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, 0x74, 0x12, 0x19, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x14, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, - 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x12, 0x48, 0x0a, 0x14, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x52, 0x65, 0x76, 0x65, 0x72, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, - 0x70, 0x63, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, + 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, + 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x12, 0x40, 0x0a, + 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x70, + 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x31, 0x0a, 0x07, 0x44, 0x69, 0x73, 0x6b, 0x47, 0x65, 0x74, 0x12, 0x17, 0x2e, 0x73, 0x70, 0x64, + 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, + 0x73, 0x6b, 0x12, 0x42, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x1b, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x53, + 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, + 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, + 0x4c, 0x6f, 0x67, 0x53, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0a, 0x45, 0x6e, - 0x67, 0x69, 0x6e, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x1b, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, - 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, - 0x0b, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x30, 0x01, - 0x12, 0x5c, 0x0a, 0x11, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x21, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4c, 0x69, 0x73, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, - 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, - 0x0a, 0x10, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, - 0x64, 0x64, 0x12, 0x20, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x54, - 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x12, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, - 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x12, 0x45, 0x6e, 0x67, - 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, - 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, - 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x23, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, - 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x60, 0x0a, 0x19, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x12, - 0x29, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6e, - 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x13, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0a, - 0x44, 0x69, 0x73, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x64, - 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, - 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x12, 0x40, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x6b, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, - 0x73, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x07, 0x44, 0x69, 0x73, 0x6b, 0x47, - 0x65, 0x74, 0x12, 0x17, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, - 0x6b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x73, 0x70, - 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x12, 0x4a, 0x0a, 0x10, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x47, 0x65, 0x74, 0x12, 0x16, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0b, 0x4c, 0x6f, + 0x67, 0x47, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x67, 0x47, + 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x43, 0x0a, 0x0b, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, - 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x6c, 0x6f, - 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2d, 0x73, 0x70, 0x64, 0x6b, 0x2d, 0x65, 0x6e, 0x67, 0x69, - 0x6e, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, + 0x2e, 0x4c, 0x6f, 0x67, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x47, 0x65, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x1e, 0x2e, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, + 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x68, 0x6f, 0x72, 0x6e, + 0x2d, 0x73, 0x70, 0x64, 0x6b, 0x2d, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x64, 0x6b, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -3755,7 +3992,7 @@ func file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawD } var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes = make([]protoimpl.MessageInfo, 57) +var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes = make([]protoimpl.MessageInfo, 61) var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_goTypes = []interface{}{ (ReplicaMode)(0), // 0: spdkrpc.ReplicaMode (*Lvol)(nil), // 1: spdkrpc.Lvol @@ -3801,40 +4038,44 @@ var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_goTyp (*DiskCreateRequest)(nil), // 41: spdkrpc.DiskCreateRequest (*DiskGetRequest)(nil), // 42: spdkrpc.DiskGetRequest (*DiskDeleteRequest)(nil), // 43: spdkrpc.DiskDeleteRequest - nil, // 44: spdkrpc.Lvol.ChildrenEntry - nil, // 45: spdkrpc.Replica.SnapshotsEntry - nil, // 46: spdkrpc.ReplicaListResponse.ReplicasEntry - nil, // 47: spdkrpc.Engine.ReplicaAddressMapEntry - nil, // 48: spdkrpc.Engine.ReplicaModeMapEntry - nil, // 49: spdkrpc.Engine.SnapshotsEntry - nil, // 50: spdkrpc.EngineCreateRequest.ReplicaAddressMapEntry - nil, // 51: spdkrpc.EngineListResponse.EnginesEntry - nil, // 52: spdkrpc.EngineReplicaListResponse.ReplicasEntry - nil, // 53: spdkrpc.BackupCreateRequest.CredentialEntry - nil, // 54: spdkrpc.EngineBackupRestoreRequest.CredentialEntry - nil, // 55: spdkrpc.EngineBackupRestoreResponse.ErrorsEntry - nil, // 56: spdkrpc.ReplicaBackupRestoreRequest.CredentialEntry - nil, // 57: spdkrpc.RestoreStatusResponse.StatusEntry - (*emptypb.Empty)(nil), // 58: google.protobuf.Empty + (*LogSetLevelRequest)(nil), // 44: spdkrpc.LogSetLevelRequest + (*LogSetFlagsRequest)(nil), // 45: spdkrpc.LogSetFlagsRequest + (*LogGetLevelResponse)(nil), // 46: spdkrpc.LogGetLevelResponse + (*LogGetFlagsResponse)(nil), // 47: spdkrpc.LogGetFlagsResponse + nil, // 48: spdkrpc.Lvol.ChildrenEntry + nil, // 49: spdkrpc.Replica.SnapshotsEntry + nil, // 50: spdkrpc.ReplicaListResponse.ReplicasEntry + nil, // 51: spdkrpc.Engine.ReplicaAddressMapEntry + nil, // 52: spdkrpc.Engine.ReplicaModeMapEntry + nil, // 53: spdkrpc.Engine.SnapshotsEntry + nil, // 54: spdkrpc.EngineCreateRequest.ReplicaAddressMapEntry + nil, // 55: spdkrpc.EngineListResponse.EnginesEntry + nil, // 56: spdkrpc.EngineReplicaListResponse.ReplicasEntry + nil, // 57: spdkrpc.BackupCreateRequest.CredentialEntry + nil, // 58: spdkrpc.EngineBackupRestoreRequest.CredentialEntry + nil, // 59: spdkrpc.EngineBackupRestoreResponse.ErrorsEntry + nil, // 60: spdkrpc.ReplicaBackupRestoreRequest.CredentialEntry + nil, // 61: spdkrpc.RestoreStatusResponse.StatusEntry + (*emptypb.Empty)(nil), // 62: google.protobuf.Empty } var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_depIdxs = []int32{ - 44, // 0: spdkrpc.Lvol.children:type_name -> spdkrpc.Lvol.ChildrenEntry + 48, // 0: spdkrpc.Lvol.children:type_name -> spdkrpc.Lvol.ChildrenEntry 1, // 1: spdkrpc.Replica.head:type_name -> spdkrpc.Lvol - 45, // 2: spdkrpc.Replica.snapshots:type_name -> spdkrpc.Replica.SnapshotsEntry - 46, // 3: spdkrpc.ReplicaListResponse.replicas:type_name -> spdkrpc.ReplicaListResponse.ReplicasEntry - 47, // 4: spdkrpc.Engine.replica_address_map:type_name -> spdkrpc.Engine.ReplicaAddressMapEntry - 48, // 5: spdkrpc.Engine.replica_mode_map:type_name -> spdkrpc.Engine.ReplicaModeMapEntry + 49, // 2: spdkrpc.Replica.snapshots:type_name -> spdkrpc.Replica.SnapshotsEntry + 50, // 3: spdkrpc.ReplicaListResponse.replicas:type_name -> spdkrpc.ReplicaListResponse.ReplicasEntry + 51, // 4: spdkrpc.Engine.replica_address_map:type_name -> spdkrpc.Engine.ReplicaAddressMapEntry + 52, // 5: spdkrpc.Engine.replica_mode_map:type_name -> spdkrpc.Engine.ReplicaModeMapEntry 1, // 6: spdkrpc.Engine.head:type_name -> spdkrpc.Lvol - 49, // 7: spdkrpc.Engine.snapshots:type_name -> spdkrpc.Engine.SnapshotsEntry - 50, // 8: spdkrpc.EngineCreateRequest.replica_address_map:type_name -> spdkrpc.EngineCreateRequest.ReplicaAddressMapEntry - 51, // 9: spdkrpc.EngineListResponse.engines:type_name -> spdkrpc.EngineListResponse.EnginesEntry - 52, // 10: spdkrpc.EngineReplicaListResponse.replicas:type_name -> spdkrpc.EngineReplicaListResponse.ReplicasEntry + 53, // 7: spdkrpc.Engine.snapshots:type_name -> spdkrpc.Engine.SnapshotsEntry + 54, // 8: spdkrpc.EngineCreateRequest.replica_address_map:type_name -> spdkrpc.EngineCreateRequest.ReplicaAddressMapEntry + 55, // 9: spdkrpc.EngineListResponse.engines:type_name -> spdkrpc.EngineListResponse.EnginesEntry + 56, // 10: spdkrpc.EngineReplicaListResponse.replicas:type_name -> spdkrpc.EngineReplicaListResponse.ReplicasEntry 26, // 11: spdkrpc.VersionDetailGetReply.version:type_name -> spdkrpc.VersionOutput - 53, // 12: spdkrpc.BackupCreateRequest.credential:type_name -> spdkrpc.BackupCreateRequest.CredentialEntry - 54, // 13: spdkrpc.EngineBackupRestoreRequest.credential:type_name -> spdkrpc.EngineBackupRestoreRequest.CredentialEntry - 55, // 14: spdkrpc.EngineBackupRestoreResponse.errors:type_name -> spdkrpc.EngineBackupRestoreResponse.ErrorsEntry - 56, // 15: spdkrpc.ReplicaBackupRestoreRequest.credential:type_name -> spdkrpc.ReplicaBackupRestoreRequest.CredentialEntry - 57, // 16: spdkrpc.RestoreStatusResponse.status:type_name -> spdkrpc.RestoreStatusResponse.StatusEntry + 57, // 12: spdkrpc.BackupCreateRequest.credential:type_name -> spdkrpc.BackupCreateRequest.CredentialEntry + 58, // 13: spdkrpc.EngineBackupRestoreRequest.credential:type_name -> spdkrpc.EngineBackupRestoreRequest.CredentialEntry + 59, // 14: spdkrpc.EngineBackupRestoreResponse.errors:type_name -> spdkrpc.EngineBackupRestoreResponse.ErrorsEntry + 60, // 15: spdkrpc.ReplicaBackupRestoreRequest.credential:type_name -> spdkrpc.ReplicaBackupRestoreRequest.CredentialEntry + 61, // 16: spdkrpc.RestoreStatusResponse.status:type_name -> spdkrpc.RestoreStatusResponse.StatusEntry 1, // 17: spdkrpc.Replica.SnapshotsEntry.value:type_name -> spdkrpc.Lvol 2, // 18: spdkrpc.ReplicaListResponse.ReplicasEntry.value:type_name -> spdkrpc.Replica 0, // 19: spdkrpc.Engine.ReplicaModeMapEntry.value:type_name -> spdkrpc.ReplicaMode @@ -3848,8 +4089,8 @@ var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_depId 24, // 27: spdkrpc.SPDKService.ReplicaSnapshotCreate:input_type -> spdkrpc.SnapshotRequest 24, // 28: spdkrpc.SPDKService.ReplicaSnapshotDelete:input_type -> spdkrpc.SnapshotRequest 24, // 29: spdkrpc.SPDKService.ReplicaSnapshotRevert:input_type -> spdkrpc.SnapshotRequest - 58, // 30: spdkrpc.SPDKService.ReplicaList:input_type -> google.protobuf.Empty - 58, // 31: spdkrpc.SPDKService.ReplicaWatch:input_type -> google.protobuf.Empty + 62, // 30: spdkrpc.SPDKService.ReplicaList:input_type -> google.protobuf.Empty + 62, // 31: spdkrpc.SPDKService.ReplicaWatch:input_type -> google.protobuf.Empty 7, // 32: spdkrpc.SPDKService.ReplicaRebuildingSrcStart:input_type -> spdkrpc.ReplicaRebuildingSrcStartRequest 8, // 33: spdkrpc.SPDKService.ReplicaRebuildingSrcFinish:input_type -> spdkrpc.ReplicaRebuildingSrcFinishRequest 9, // 34: spdkrpc.SPDKService.ReplicaRebuildingSrcAttach:input_type -> spdkrpc.ReplicaRebuildingSrcAttachRequest @@ -3869,8 +4110,8 @@ var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_depId 24, // 48: spdkrpc.SPDKService.EngineSnapshotCreate:input_type -> spdkrpc.SnapshotRequest 24, // 49: spdkrpc.SPDKService.EngineSnapshotDelete:input_type -> spdkrpc.SnapshotRequest 24, // 50: spdkrpc.SPDKService.EngineSnapshotRevert:input_type -> spdkrpc.SnapshotRequest - 58, // 51: spdkrpc.SPDKService.EngineList:input_type -> google.protobuf.Empty - 58, // 52: spdkrpc.SPDKService.EngineWatch:input_type -> google.protobuf.Empty + 62, // 51: spdkrpc.SPDKService.EngineList:input_type -> google.protobuf.Empty + 62, // 52: spdkrpc.SPDKService.EngineWatch:input_type -> google.protobuf.Empty 21, // 53: spdkrpc.SPDKService.EngineReplicaList:input_type -> spdkrpc.EngineReplicaListRequest 20, // 54: spdkrpc.SPDKService.EngineReplicaAdd:input_type -> spdkrpc.EngineReplicaAddRequest 23, // 55: spdkrpc.SPDKService.EngineReplicaDelete:input_type -> spdkrpc.EngineReplicaDeleteRequest @@ -3882,50 +4123,58 @@ var file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_depId 41, // 61: spdkrpc.SPDKService.DiskCreate:input_type -> spdkrpc.DiskCreateRequest 43, // 62: spdkrpc.SPDKService.DiskDelete:input_type -> spdkrpc.DiskDeleteRequest 42, // 63: spdkrpc.SPDKService.DiskGet:input_type -> spdkrpc.DiskGetRequest - 58, // 64: spdkrpc.SPDKService.VersionDetailGet:input_type -> google.protobuf.Empty - 2, // 65: spdkrpc.SPDKService.ReplicaCreate:output_type -> spdkrpc.Replica - 58, // 66: spdkrpc.SPDKService.ReplicaDelete:output_type -> google.protobuf.Empty - 2, // 67: spdkrpc.SPDKService.ReplicaGet:output_type -> spdkrpc.Replica - 2, // 68: spdkrpc.SPDKService.ReplicaSnapshotCreate:output_type -> spdkrpc.Replica - 58, // 69: spdkrpc.SPDKService.ReplicaSnapshotDelete:output_type -> google.protobuf.Empty - 58, // 70: spdkrpc.SPDKService.ReplicaSnapshotRevert:output_type -> google.protobuf.Empty - 6, // 71: spdkrpc.SPDKService.ReplicaList:output_type -> spdkrpc.ReplicaListResponse - 58, // 72: spdkrpc.SPDKService.ReplicaWatch:output_type -> google.protobuf.Empty - 58, // 73: spdkrpc.SPDKService.ReplicaRebuildingSrcStart:output_type -> google.protobuf.Empty - 58, // 74: spdkrpc.SPDKService.ReplicaRebuildingSrcFinish:output_type -> google.protobuf.Empty - 58, // 75: spdkrpc.SPDKService.ReplicaRebuildingSrcAttach:output_type -> google.protobuf.Empty - 58, // 76: spdkrpc.SPDKService.ReplicaRebuildingSrcDetach:output_type -> google.protobuf.Empty - 58, // 77: spdkrpc.SPDKService.ReplicaSnapshotShallowCopy:output_type -> google.protobuf.Empty - 13, // 78: spdkrpc.SPDKService.ReplicaRebuildingDstStart:output_type -> spdkrpc.ReplicaRebuildingDstStartResponse - 58, // 79: spdkrpc.SPDKService.ReplicaRebuildingDstFinish:output_type -> google.protobuf.Empty - 58, // 80: spdkrpc.SPDKService.ReplicaRebuildingDstSnapshotCreate:output_type -> google.protobuf.Empty - 58, // 81: spdkrpc.SPDKService.ReplicaRebuildingDstSnapshotRevert:output_type -> google.protobuf.Empty - 29, // 82: spdkrpc.SPDKService.ReplicaBackupCreate:output_type -> spdkrpc.BackupCreateResponse - 31, // 83: spdkrpc.SPDKService.ReplicaBackupStatus:output_type -> spdkrpc.BackupStatusResponse - 58, // 84: spdkrpc.SPDKService.ReplicaBackupRestore:output_type -> google.protobuf.Empty - 38, // 85: spdkrpc.SPDKService.ReplicaRestoreStatus:output_type -> spdkrpc.ReplicaRestoreStatusResponse - 15, // 86: spdkrpc.SPDKService.EngineCreate:output_type -> spdkrpc.Engine - 58, // 87: spdkrpc.SPDKService.EngineDelete:output_type -> google.protobuf.Empty - 15, // 88: spdkrpc.SPDKService.EngineGet:output_type -> spdkrpc.Engine - 25, // 89: spdkrpc.SPDKService.EngineSnapshotCreate:output_type -> spdkrpc.SnapshotResponse - 58, // 90: spdkrpc.SPDKService.EngineSnapshotDelete:output_type -> google.protobuf.Empty - 58, // 91: spdkrpc.SPDKService.EngineSnapshotRevert:output_type -> google.protobuf.Empty - 19, // 92: spdkrpc.SPDKService.EngineList:output_type -> spdkrpc.EngineListResponse - 58, // 93: spdkrpc.SPDKService.EngineWatch:output_type -> google.protobuf.Empty - 22, // 94: spdkrpc.SPDKService.EngineReplicaList:output_type -> spdkrpc.EngineReplicaListResponse - 58, // 95: spdkrpc.SPDKService.EngineReplicaAdd:output_type -> google.protobuf.Empty - 58, // 96: spdkrpc.SPDKService.EngineReplicaDelete:output_type -> google.protobuf.Empty - 29, // 97: spdkrpc.SPDKService.EngineBackupCreate:output_type -> spdkrpc.BackupCreateResponse - 31, // 98: spdkrpc.SPDKService.EngineBackupStatus:output_type -> spdkrpc.BackupStatusResponse - 33, // 99: spdkrpc.SPDKService.EngineBackupRestore:output_type -> spdkrpc.EngineBackupRestoreResponse - 58, // 100: spdkrpc.SPDKService.EngineBackupRestoreFinish:output_type -> google.protobuf.Empty - 39, // 101: spdkrpc.SPDKService.EngineRestoreStatus:output_type -> spdkrpc.RestoreStatusResponse - 40, // 102: spdkrpc.SPDKService.DiskCreate:output_type -> spdkrpc.Disk - 58, // 103: spdkrpc.SPDKService.DiskDelete:output_type -> google.protobuf.Empty - 40, // 104: spdkrpc.SPDKService.DiskGet:output_type -> spdkrpc.Disk - 27, // 105: spdkrpc.SPDKService.VersionDetailGet:output_type -> spdkrpc.VersionDetailGetReply - 65, // [65:106] is the sub-list for method output_type - 24, // [24:65] is the sub-list for method input_type + 44, // 64: spdkrpc.SPDKService.LogSetLevel:input_type -> spdkrpc.LogSetLevelRequest + 45, // 65: spdkrpc.SPDKService.LogSetFlags:input_type -> spdkrpc.LogSetFlagsRequest + 62, // 66: spdkrpc.SPDKService.LogGetLevel:input_type -> google.protobuf.Empty + 62, // 67: spdkrpc.SPDKService.LogGetFlags:input_type -> google.protobuf.Empty + 62, // 68: spdkrpc.SPDKService.VersionDetailGet:input_type -> google.protobuf.Empty + 2, // 69: spdkrpc.SPDKService.ReplicaCreate:output_type -> spdkrpc.Replica + 62, // 70: spdkrpc.SPDKService.ReplicaDelete:output_type -> google.protobuf.Empty + 2, // 71: spdkrpc.SPDKService.ReplicaGet:output_type -> spdkrpc.Replica + 2, // 72: spdkrpc.SPDKService.ReplicaSnapshotCreate:output_type -> spdkrpc.Replica + 62, // 73: spdkrpc.SPDKService.ReplicaSnapshotDelete:output_type -> google.protobuf.Empty + 62, // 74: spdkrpc.SPDKService.ReplicaSnapshotRevert:output_type -> google.protobuf.Empty + 6, // 75: spdkrpc.SPDKService.ReplicaList:output_type -> spdkrpc.ReplicaListResponse + 62, // 76: spdkrpc.SPDKService.ReplicaWatch:output_type -> google.protobuf.Empty + 62, // 77: spdkrpc.SPDKService.ReplicaRebuildingSrcStart:output_type -> google.protobuf.Empty + 62, // 78: spdkrpc.SPDKService.ReplicaRebuildingSrcFinish:output_type -> google.protobuf.Empty + 62, // 79: spdkrpc.SPDKService.ReplicaRebuildingSrcAttach:output_type -> google.protobuf.Empty + 62, // 80: spdkrpc.SPDKService.ReplicaRebuildingSrcDetach:output_type -> google.protobuf.Empty + 62, // 81: spdkrpc.SPDKService.ReplicaSnapshotShallowCopy:output_type -> google.protobuf.Empty + 13, // 82: spdkrpc.SPDKService.ReplicaRebuildingDstStart:output_type -> spdkrpc.ReplicaRebuildingDstStartResponse + 62, // 83: spdkrpc.SPDKService.ReplicaRebuildingDstFinish:output_type -> google.protobuf.Empty + 62, // 84: spdkrpc.SPDKService.ReplicaRebuildingDstSnapshotCreate:output_type -> google.protobuf.Empty + 62, // 85: spdkrpc.SPDKService.ReplicaRebuildingDstSnapshotRevert:output_type -> google.protobuf.Empty + 29, // 86: spdkrpc.SPDKService.ReplicaBackupCreate:output_type -> spdkrpc.BackupCreateResponse + 31, // 87: spdkrpc.SPDKService.ReplicaBackupStatus:output_type -> spdkrpc.BackupStatusResponse + 62, // 88: spdkrpc.SPDKService.ReplicaBackupRestore:output_type -> google.protobuf.Empty + 38, // 89: spdkrpc.SPDKService.ReplicaRestoreStatus:output_type -> spdkrpc.ReplicaRestoreStatusResponse + 15, // 90: spdkrpc.SPDKService.EngineCreate:output_type -> spdkrpc.Engine + 62, // 91: spdkrpc.SPDKService.EngineDelete:output_type -> google.protobuf.Empty + 15, // 92: spdkrpc.SPDKService.EngineGet:output_type -> spdkrpc.Engine + 25, // 93: spdkrpc.SPDKService.EngineSnapshotCreate:output_type -> spdkrpc.SnapshotResponse + 62, // 94: spdkrpc.SPDKService.EngineSnapshotDelete:output_type -> google.protobuf.Empty + 62, // 95: spdkrpc.SPDKService.EngineSnapshotRevert:output_type -> google.protobuf.Empty + 19, // 96: spdkrpc.SPDKService.EngineList:output_type -> spdkrpc.EngineListResponse + 62, // 97: spdkrpc.SPDKService.EngineWatch:output_type -> google.protobuf.Empty + 22, // 98: spdkrpc.SPDKService.EngineReplicaList:output_type -> spdkrpc.EngineReplicaListResponse + 62, // 99: spdkrpc.SPDKService.EngineReplicaAdd:output_type -> google.protobuf.Empty + 62, // 100: spdkrpc.SPDKService.EngineReplicaDelete:output_type -> google.protobuf.Empty + 29, // 101: spdkrpc.SPDKService.EngineBackupCreate:output_type -> spdkrpc.BackupCreateResponse + 31, // 102: spdkrpc.SPDKService.EngineBackupStatus:output_type -> spdkrpc.BackupStatusResponse + 33, // 103: spdkrpc.SPDKService.EngineBackupRestore:output_type -> spdkrpc.EngineBackupRestoreResponse + 62, // 104: spdkrpc.SPDKService.EngineBackupRestoreFinish:output_type -> google.protobuf.Empty + 39, // 105: spdkrpc.SPDKService.EngineRestoreStatus:output_type -> spdkrpc.RestoreStatusResponse + 40, // 106: spdkrpc.SPDKService.DiskCreate:output_type -> spdkrpc.Disk + 62, // 107: spdkrpc.SPDKService.DiskDelete:output_type -> google.protobuf.Empty + 40, // 108: spdkrpc.SPDKService.DiskGet:output_type -> spdkrpc.Disk + 62, // 109: spdkrpc.SPDKService.LogSetLevel:output_type -> google.protobuf.Empty + 62, // 110: spdkrpc.SPDKService.LogSetFlags:output_type -> google.protobuf.Empty + 46, // 111: spdkrpc.SPDKService.LogGetLevel:output_type -> spdkrpc.LogGetLevelResponse + 47, // 112: spdkrpc.SPDKService.LogGetFlags:output_type -> spdkrpc.LogGetFlagsResponse + 27, // 113: spdkrpc.SPDKService.VersionDetailGet:output_type -> spdkrpc.VersionDetailGetReply + 69, // [69:114] is the sub-list for method output_type + 24, // [24:69] is the sub-list for method input_type 24, // [24:24] is the sub-list for extension type_name 24, // [24:24] is the sub-list for extension extendee 0, // [0:24] is the sub-list for field type_name @@ -4453,6 +4702,54 @@ func file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_init return nil } } + file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogSetLevelRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogSetFlagsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogGetLevelResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogGetFlagsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -4460,7 +4757,7 @@ func file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_init GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_github_com_longhorn_longhorn_spdk_engine_proto_spdkrpc_spdk_proto_rawDesc, NumEnums: 1, - NumMessages: 57, + NumMessages: 61, NumExtensions: 0, NumServices: 1, }, @@ -4527,6 +4824,10 @@ type SPDKServiceClient interface { DiskCreate(ctx context.Context, in *DiskCreateRequest, opts ...grpc.CallOption) (*Disk, error) DiskDelete(ctx context.Context, in *DiskDeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) DiskGet(ctx context.Context, in *DiskGetRequest, opts ...grpc.CallOption) (*Disk, error) + LogSetLevel(ctx context.Context, in *LogSetLevelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + LogSetFlags(ctx context.Context, in *LogSetFlagsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + LogGetLevel(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*LogGetLevelResponse, error) + LogGetFlags(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*LogGetFlagsResponse, error) VersionDetailGet(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionDetailGetReply, error) } @@ -4944,6 +5245,42 @@ func (c *sPDKServiceClient) DiskGet(ctx context.Context, in *DiskGetRequest, opt return out, nil } +func (c *sPDKServiceClient) LogSetLevel(ctx context.Context, in *LogSetLevelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/spdkrpc.SPDKService/LogSetLevel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sPDKServiceClient) LogSetFlags(ctx context.Context, in *LogSetFlagsRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/spdkrpc.SPDKService/LogSetFlags", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sPDKServiceClient) LogGetLevel(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*LogGetLevelResponse, error) { + out := new(LogGetLevelResponse) + err := c.cc.Invoke(ctx, "/spdkrpc.SPDKService/LogGetLevel", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sPDKServiceClient) LogGetFlags(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*LogGetFlagsResponse, error) { + out := new(LogGetFlagsResponse) + err := c.cc.Invoke(ctx, "/spdkrpc.SPDKService/LogGetFlags", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *sPDKServiceClient) VersionDetailGet(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionDetailGetReply, error) { out := new(VersionDetailGetReply) err := c.cc.Invoke(ctx, "/spdkrpc.SPDKService/VersionDetailGet", in, out, opts...) @@ -4995,6 +5332,10 @@ type SPDKServiceServer interface { DiskCreate(context.Context, *DiskCreateRequest) (*Disk, error) DiskDelete(context.Context, *DiskDeleteRequest) (*emptypb.Empty, error) DiskGet(context.Context, *DiskGetRequest) (*Disk, error) + LogSetLevel(context.Context, *LogSetLevelRequest) (*emptypb.Empty, error) + LogSetFlags(context.Context, *LogSetFlagsRequest) (*emptypb.Empty, error) + LogGetLevel(context.Context, *emptypb.Empty) (*LogGetLevelResponse, error) + LogGetFlags(context.Context, *emptypb.Empty) (*LogGetFlagsResponse, error) VersionDetailGet(context.Context, *emptypb.Empty) (*VersionDetailGetReply, error) } @@ -5122,6 +5463,18 @@ func (*UnimplementedSPDKServiceServer) DiskDelete(context.Context, *DiskDeleteRe func (*UnimplementedSPDKServiceServer) DiskGet(context.Context, *DiskGetRequest) (*Disk, error) { return nil, status.Errorf(codes.Unimplemented, "method DiskGet not implemented") } +func (*UnimplementedSPDKServiceServer) LogSetLevel(context.Context, *LogSetLevelRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogSetLevel not implemented") +} +func (*UnimplementedSPDKServiceServer) LogSetFlags(context.Context, *LogSetFlagsRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogSetFlags not implemented") +} +func (*UnimplementedSPDKServiceServer) LogGetLevel(context.Context, *emptypb.Empty) (*LogGetLevelResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogGetLevel not implemented") +} +func (*UnimplementedSPDKServiceServer) LogGetFlags(context.Context, *emptypb.Empty) (*LogGetFlagsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LogGetFlags not implemented") +} func (*UnimplementedSPDKServiceServer) VersionDetailGet(context.Context, *emptypb.Empty) (*VersionDetailGetReply, error) { return nil, status.Errorf(codes.Unimplemented, "method VersionDetailGet not implemented") } @@ -5856,6 +6209,78 @@ func _SPDKService_DiskGet_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +func _SPDKService_LogSetLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LogSetLevelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SPDKServiceServer).LogSetLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spdkrpc.SPDKService/LogSetLevel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SPDKServiceServer).LogSetLevel(ctx, req.(*LogSetLevelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SPDKService_LogSetFlags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LogSetFlagsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SPDKServiceServer).LogSetFlags(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spdkrpc.SPDKService/LogSetFlags", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SPDKServiceServer).LogSetFlags(ctx, req.(*LogSetFlagsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SPDKService_LogGetLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SPDKServiceServer).LogGetLevel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spdkrpc.SPDKService/LogGetLevel", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SPDKServiceServer).LogGetLevel(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SPDKService_LogGetFlags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SPDKServiceServer).LogGetFlags(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spdkrpc.SPDKService/LogGetFlags", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SPDKServiceServer).LogGetFlags(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + func _SPDKService_VersionDetailGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(emptypb.Empty) if err := dec(in); err != nil { @@ -6030,6 +6455,22 @@ var _SPDKService_serviceDesc = grpc.ServiceDesc{ MethodName: "DiskGet", Handler: _SPDKService_DiskGet_Handler, }, + { + MethodName: "LogSetLevel", + Handler: _SPDKService_LogSetLevel_Handler, + }, + { + MethodName: "LogSetFlags", + Handler: _SPDKService_LogSetFlags_Handler, + }, + { + MethodName: "LogGetLevel", + Handler: _SPDKService_LogGetLevel_Handler, + }, + { + MethodName: "LogGetFlags", + Handler: _SPDKService_LogGetFlags_Handler, + }, { MethodName: "VersionDetailGet", Handler: _SPDKService_VersionDetailGet_Handler, diff --git a/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.proto b/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.proto index ab85af2ca0b..730ad6b2a12 100644 --- a/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.proto +++ b/vendor/github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc/spdk.proto @@ -50,6 +50,11 @@ service SPDKService { rpc DiskDelete(DiskDeleteRequest) returns (google.protobuf.Empty); rpc DiskGet(DiskGetRequest) returns (Disk); + rpc LogSetLevel(LogSetLevelRequest) returns (google.protobuf.Empty); + rpc LogSetFlags(LogSetFlagsRequest) returns (google.protobuf.Empty); + rpc LogGetLevel(google.protobuf.Empty) returns (LogGetLevelResponse); + rpc LogGetFlags(google.protobuf.Empty) returns (LogGetFlagsResponse); + rpc VersionDetailGet(google.protobuf.Empty) returns(VersionDetailGetReply); } @@ -61,6 +66,7 @@ message Lvol { string parent = 5; map children = 6; string creation_time = 7; + bool user_created = 8; } message Replica { @@ -210,6 +216,7 @@ message EngineReplicaDeleteRequest { message SnapshotRequest { string name = 1; string snapshot_name = 2; + bool user_created = 3; } message SnapshotResponse { @@ -351,3 +358,19 @@ message DiskDeleteRequest { string disk_name = 1; string disk_uuid = 2; } + +message LogSetLevelRequest { + string level = 1; +} + +message LogSetFlagsRequest { + string flags = 1; +} + +message LogGetLevelResponse { + string level = 1; +} + +message LogGetFlagsResponse { + string flags = 1; +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3edaf5..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b72e7..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946bb9..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be214370d..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636aac..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad226f..00000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 1feba62c6c9..b5c8bcb395a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -475,6 +475,9 @@ type HistogramOpts struct { // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.now == nil { opts.now = time.Now } - + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, lastResetTime: opts.now(), now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -716,9 +722,16 @@ type histogram struct { nativeHistogramMinResetDuration time.Duration // lastResetTime is protected by mtx. It is also used as created timestamp. lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. func (h *histogram) maybeReset( hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, ) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } @@ -906,6 +929,29 @@ func (h *histogram) maybeReset( return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index b3c4eca2bc1..c21911f292d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb613..8c1136ceea3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go similarity index 63% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go index c318385cbed..d8d9a6d7a2f 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -1,10 +1,9 @@ -// Copyright 2013 Matt T. Proud -// +// Copyright 2023 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,5 +11,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 84946b27035..cee360db7f3 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -474,6 +474,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 90639781513..3597bfd93e3 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/model" ) @@ -72,8 +73,8 @@ func ResponseFormat(h http.Header) Format { // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: + switch format.FormatType() { + case TypeProtoDelim: return &protoDecoder{r: r} } return &textDecoder{r: r} @@ -86,8 +87,10 @@ type protoDecoder struct { // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffaad7..97ee673d944 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,10 +18,12 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "github.com/prometheus/common/model" + dto "github.com/prometheus/client_model/go" ) @@ -60,23 +62,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return FmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return FmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return FmtText + escapingScheme } } - return FmtText + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -84,29 +95,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return FmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return FmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { - if ver == OpenMetricsVersion_1_0_0 { - return FmtOpenMetrics_1_0_0 + switch ver { + case OpenMetricsVersion_1_0_0: + return FmtOpenMetrics_1_0_0 + escapingScheme + default: + return FmtOpenMetrics_0_0_1 + escapingScheme } - return FmtOpenMetrics_0_0_1 } } - return FmtText + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -115,44 +137,48 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, prototext.Format(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index c4cb20f0d3e..f9b6f70fe8a 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,10 +14,22 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( TextVersion = "0.0.4" ProtoType = `application/vnd.google.protobuf` @@ -27,7 +39,8 @@ const ( OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. FmtUnknown Format = `` FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` @@ -41,3 +54,93 @@ const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + if len(toks) < 2 { + return TypeUnknown + } + + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 21cdddcf054..5622578ed6c 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -35,6 +35,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -98,7 +110,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, shortName) written += n if err != nil { return @@ -303,21 +315,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -365,27 +365,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -451,7 +482,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 2946b8f1a64..f9b8265a9ec 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -62,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -98,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -124,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -280,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -330,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -462,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(model.LabelValue(name)) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 35db1cc9d73..26490211af2 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,6 +16,7 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" @@ -24,8 +25,9 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn { // which is not an error but the signal that we are done. // Any other error that happens to align with the start of // a line is still an error. - if p.err == io.EOF { + if errors.Is(p.err, io.EOF) { p.err = nil } return nil diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 35e739c7ad2..178fdbaf615 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -90,13 +90,13 @@ func (a *Alert) Validate() error { return fmt.Errorf("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return fmt.Errorf("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index ef895633546..3317ce22ff7 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff name matches the pattern of LabelNameRE for legacy +// names, and iff it's valid UTF-8 if NameValidationScheme is set to +// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the +// check but a much faster hardcoded implementation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false + switch NameValidationScheme { + case LegacyValidation: + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } } + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } return true } @@ -164,7 +172,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 00000000000..447ab8ad635 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7fedb..0bd29b3a3f6 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -18,15 +18,84 @@ import ( "regexp" "sort" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // NameValidationScheme determines the method of name validation to be used by + // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode + // in isolation from other components that don't support UTF-8 may result in + // bugs or other undefined behavior. This value is intended to be set by + // UTF-8-aware binaries as part of their startup. To avoid need for locking, + // this value should be set once, ideally in an init(), before multiple + // goroutines are started. + NameValidationScheme = LegacyValidation + + // NameEscapingScheme defines the default way that names will be + // escaped when presented to systems that do not support UTF-8 names. If the + // Content-Type "escaping" term is specified, that will override this value. + NameEscapingScheme = ValueEncodingEscaping +) + +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requirets that metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation +) + +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" ) +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet @@ -86,17 +155,302 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(n) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n LabelValue) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + return true + } + if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + return true + } + } + return false +} + +const ( + lowerhex = "0123456789abcdef" +) + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(LabelValue(name)) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else if b < 0x100 { + escaped.WriteRune('_') + for s := 4; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } else if b < 0x10000 { + escaped.WriteRune('_') + for s := 12; s >= 0; s -= 4 { + escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) + } + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value. + if j > 4 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme " + s) + } +} diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go index 8762b13c63d..dc8a0026c48 100644 --- a/vendor/github.com/prometheus/common/model/signature.go +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d2cc..910b0b71fcc 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -81,7 +81,7 @@ func (s *Silence) Validate() error { } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 9eb440413fd..8050637d822 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -21,14 +21,12 @@ import ( "strings" ) -var ( - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} // Sample is a sample pair associated with a metric. A single sample must either // define Value or Histogram but not both. Histogram == nil implies the Value @@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 0f615a70530..ae35cc2ab4b 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -20,14 +20,12 @@ import ( "strconv" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} -) +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} // A SampleValue is a representation of a value for a given sample at a given // time. diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ce7ea4612e..062a2818563 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.53.3 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 13d74e39571..134767d69ac 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows || nostatfs -// +build netbsd openbsd solaris windows nostatfs +//go:build !freebsd && !linux +// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index bee151445a0..80df79c3193 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs -// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs +//go:build freebsd || linux +// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 852c8c4a0e4..9d8af6db742 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -233,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4b7933e4f97..fa761b35295 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 727549a13f8..7e75c286b5b 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c055d075db0..46307f5721e 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -23,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -32,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 { sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) return g } + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go index 054d058c850..cb192d59a80 100644 --- a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go @@ -2,17 +2,24 @@ package apply import ( "context" + "errors" + "fmt" "github.com/rancher/wrangler/pkg/apply/injectors" "github.com/rancher/wrangler/pkg/kv" "github.com/rancher/wrangler/pkg/merr" "github.com/rancher/wrangler/pkg/objectset" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" ) +// Indexer name added for cached types +const byHash = "wrangler.byObjectSetHash" + type patchKey struct { schema.GroupVersionKind objectset.ObjectKey @@ -160,13 +167,40 @@ func (o desiredSet) WithCacheTypes(igs ...InformerGetter) Apply { } for _, ig := range igs { - pruneTypes[ig.GroupVersionKind()] = ig.Informer() + informer := ig.Informer() + if err := addIndexerByHash(informer.GetIndexer()); err != nil { + // Ignore repeatedly adding the same indexer for different types + if !errors.Is(err, indexerAlreadyExistsErr) { + logrus.Warnf("Problem adding hash indexer to informer [%s]: %v", ig.GroupVersionKind().Kind, err) + } + } + pruneTypes[ig.GroupVersionKind()] = informer } o.pruneTypes = pruneTypes return o } +// addIndexerByHash an Informer to index objects by the hash annotation value +func addIndexerByHash(indexer cache.Indexer) error { + if _, alreadyAdded := indexer.GetIndexers()[byHash]; alreadyAdded { + return fmt.Errorf("adding indexer %q: %w", byHash, indexerAlreadyExistsErr) + } + return indexer.AddIndexers(map[string]cache.IndexFunc{ + byHash: func(obj interface{}) ([]string, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + labels := metadata.GetLabels() + if labels == nil || labels[LabelHash] == "" { + return nil, nil + } + return []string{labels[LabelHash]}, nil + }, + }) +} + func (o desiredSet) WithPatcher(gvk schema.GroupVersionKind, patcher Patcher) Apply { patchers := map[schema.GroupVersionKind]Patcher{} for k, v := range o.patchers { diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go index cf4ee23280e..cb68e4e8ea3 100644 --- a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go @@ -40,8 +40,9 @@ var ( LabelName, LabelNamespace, } - rls = map[string]flowcontrol.RateLimiter{} - rlsLock sync.Mutex + rls = map[string]flowcontrol.RateLimiter{} + rlsLock sync.Mutex + indexerAlreadyExistsErr = errors.New("an indexer with the same already exists") ) func (o *desiredSet) getRateLimit(labelHash string) flowcontrol.RateLimiter { diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go index e24c8d59a9b..84710113bc4 100644 --- a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go @@ -338,8 +338,7 @@ func (o *desiredSet) process(debugID string, set labels.Selector, gvk schema.Gro } } -func (o *desiredSet) list(namespaced bool, informer cache.SharedIndexInformer, client dynamic.NamespaceableResourceInterface, - selector labels.Selector, desiredObjects objectset.ObjectByKey) (map[objectset.ObjectKey]runtime.Object, error) { +func (o *desiredSet) list(namespaced bool, informer cache.SharedIndexInformer, client dynamic.NamespaceableResourceInterface, selector labels.Selector, desiredObjects objectset.ObjectByKey) (map[objectset.ObjectKey]runtime.Object, error) { var ( errs []error objs = objectset.ObjectByKey{} @@ -388,12 +387,17 @@ func (o *desiredSet) list(namespaced bool, informer cache.SharedIndexInformer, c namespace = o.listerNamespace } - err := cache.ListAllByNamespace(informer.GetIndexer(), namespace, selector, func(obj interface{}) { + // Special case for listing only by hash using indexers + indexer := informer.GetIndexer() + if hash, ok := getIndexableHash(indexer, selector); ok { + return listByHash(indexer, hash, namespace) + } + + if err := cache.ListAllByNamespace(indexer, namespace, selector, func(obj interface{}) { if err := addObjectToMap(objs, obj); err != nil { errs = append(errs, err) } - }) - if err != nil { + }); err != nil { errs = append(errs, err) } @@ -494,3 +498,45 @@ func multiNamespaceList(ctx context.Context, namespaces []string, baseClient dyn return wg.Wait() } + +// getIndexableHash detects if provided selector can be replaced by using the hash index, if configured, in which case returns the hash value +func getIndexableHash(indexer cache.Indexer, selector labels.Selector) (string, bool) { + // Check if indexer was added + if indexer == nil || indexer.GetIndexers()[byHash] == nil { + return "", false + } + + // Check specific case of listing with exact hash label selector + if req, selectable := selector.Requirements(); len(req) != 1 || !selectable { + return "", false + } + + return selector.RequiresExactMatch(LabelHash) +} + +// inNamespace checks whether a given object is a Kubernetes object and is part of the provided namespace +func inNamespace(namespace string, obj interface{}) bool { + metadata, err := meta.Accessor(obj) + return err == nil && metadata.GetNamespace() == namespace +} + +// listByHash use a pre-configured indexer to list objects of a certain type by their hash label +func listByHash(indexer cache.Indexer, hash string, namespace string) (map[objectset.ObjectKey]runtime.Object, error) { + var ( + errs []error + objs = objectset.ObjectByKey{} + ) + res, err := indexer.ByIndex(byHash, hash) + if err != nil { + return nil, err + } + for _, obj := range res { + if namespace != "" && !inNamespace(namespace, obj) { + continue + } + if err := addObjectToMap(objs, obj); err != nil { + errs = append(errs, err) + } + } + return objs, merr.NewErrors(errs...) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/type_go.go b/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/type_go.go index e6665daf7bc..c8033ac7aa5 100644 --- a/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/type_go.go +++ b/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/type_go.go @@ -313,10 +313,14 @@ func (c *{{.lowerName}}Cache) GetByIndex(indexName, key string) (result []*{{.ve } {{ if .hasStatus -}} +// {{.type}}StatusHandler is executed for every added or modified {{.type}}. Should return the new status to be updated type {{.type}}StatusHandler func(obj *{{.version}}.{{.type}}, status {{.version}}.{{.statusType}}) ({{.version}}.{{.statusType}}, error) +// {{.type}}GeneratingHandler is the top-level handler that is executed for every {{.type}} event. It extends {{.type}}StatusHandler by a returning a slice of child objects to be passed to apply.Apply type {{.type}}GeneratingHandler func(obj *{{.version}}.{{.type}}, status {{.version}}.{{.statusType}}) ([]runtime.Object, {{.version}}.{{.statusType}}, error) +// Register{{.type}}StatusHandler configures a {{.type}}Controller to execute a {{.type}}StatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func Register{{.type}}StatusHandler(ctx context.Context, controller {{.type}}Controller, condition condition.Cond, name string, handler {{.type}}StatusHandler) { statusHandler := &{{.lowerName}}StatusHandler{ client: controller, @@ -326,6 +330,8 @@ func Register{{.type}}StatusHandler(ctx context.Context, controller {{.type}}Con controller.AddGenericHandler(ctx, name, From{{.type}}HandlerToHandler(statusHandler.sync)) } +// Register{{.type}}GeneratingHandler configures a {{.type}}Controller to execute a {{.type}}GeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func Register{{.type}}GeneratingHandler(ctx context.Context, controller {{.type}}Controller, apply apply.Apply, condition condition.Cond, name string, handler {{.type}}GeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &{{.lowerName}}GeneratingHandler{ @@ -347,6 +353,7 @@ type {{.lowerName}}StatusHandler struct { handler {{.type}}StatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *{{.lowerName}}StatusHandler) sync(key string, obj *{{.version}}.{{.type}}) (*{{.version}}.{{.type}}, error) { if obj == nil { return obj, nil @@ -388,12 +395,14 @@ func (a *{{.lowerName}}StatusHandler) sync(key string, obj *{{.version}}.{{.type type {{.lowerName}}GeneratingHandler struct { {{.type}}GeneratingHandler - apply apply.Apply - opts generic.GeneratingHandlerOptions - gvk schema.GroupVersionKind - name string + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *{{.lowerName}}GeneratingHandler) Remove(key string, obj *{{.version}}.{{.type}}) (*{{.version}}.{{.type}}, error) { if obj != nil { return obj, nil @@ -403,12 +412,17 @@ func (a *{{.lowerName}}GeneratingHandler) Remove(key string, obj *{{.version}}.{ obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured {{.type}}GeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *{{.lowerName}}GeneratingHandler) Handle(obj *{{.version}}.{{.type}}, status {{.version}}.{{.statusType}}) ({{.version}}.{{.statusType}}, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -418,11 +432,43 @@ func (a *{{.lowerName}}GeneratingHandler) Handle(obj *{{.version}}.{{.type}}, st if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *{{.lowerName}}GeneratingHandler) isNewResourceVersion(obj *{{.version}}.{{.type}}) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *{{.lowerName}}GeneratingHandler) storeResourceVersion(obj *{{.version}}.{{.type}}) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } {{- end }} ` diff --git a/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/util.go b/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/util.go index 5458b07931f..06b6df3dc70 100644 --- a/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/util.go +++ b/vendor/github.com/rancher/wrangler/pkg/controller-gen/generators/util.go @@ -11,6 +11,7 @@ import ( var ( Imports = []string{ "context", + "sync", "time", "k8s.io/client-go/rest", "github.com/rancher/lasso/pkg/client", diff --git a/vendor/github.com/rancher/wrangler/pkg/controller-gen/main.go b/vendor/github.com/rancher/wrangler/pkg/controller-gen/main.go index beea92a0422..731d7ea3450 100644 --- a/vendor/github.com/rancher/wrangler/pkg/controller-gen/main.go +++ b/vendor/github.com/rancher/wrangler/pkg/controller-gen/main.go @@ -28,10 +28,6 @@ import ( "k8s.io/gengo/types" ) -var ( - t = true -) - func Run(opts cgargs.Options) { customArgs := &cgargs.CustomArgs{ Options: opts, diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiextensions.k8s.io/v1/customresourcedefinition.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiextensions.k8s.io/v1/customresourcedefinition.go index be7526f8ae0..1d57b7c1b30 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiextensions.k8s.io/v1/customresourcedefinition.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiextensions.k8s.io/v1/customresourcedefinition.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *customResourceDefinitionCache) GetByIndex(indexName, key string) (resul return result, nil } +// CustomResourceDefinitionStatusHandler is executed for every added or modified CustomResourceDefinition. Should return the new status to be updated type CustomResourceDefinitionStatusHandler func(obj *v1.CustomResourceDefinition, status v1.CustomResourceDefinitionStatus) (v1.CustomResourceDefinitionStatus, error) +// CustomResourceDefinitionGeneratingHandler is the top-level handler that is executed for every CustomResourceDefinition event. It extends CustomResourceDefinitionStatusHandler by a returning a slice of child objects to be passed to apply.Apply type CustomResourceDefinitionGeneratingHandler func(obj *v1.CustomResourceDefinition, status v1.CustomResourceDefinitionStatus) ([]runtime.Object, v1.CustomResourceDefinitionStatus, error) +// RegisterCustomResourceDefinitionStatusHandler configures a CustomResourceDefinitionController to execute a CustomResourceDefinitionStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterCustomResourceDefinitionStatusHandler(ctx context.Context, controller CustomResourceDefinitionController, condition condition.Cond, name string, handler CustomResourceDefinitionStatusHandler) { statusHandler := &customResourceDefinitionStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterCustomResourceDefinitionStatusHandler(ctx context.Context, controll controller.AddGenericHandler(ctx, name, FromCustomResourceDefinitionHandlerToHandler(statusHandler.sync)) } +// RegisterCustomResourceDefinitionGeneratingHandler configures a CustomResourceDefinitionController to execute a CustomResourceDefinitionGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterCustomResourceDefinitionGeneratingHandler(ctx context.Context, controller CustomResourceDefinitionController, apply apply.Apply, condition condition.Cond, name string, handler CustomResourceDefinitionGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &customResourceDefinitionGeneratingHandler{ @@ -297,6 +304,7 @@ type customResourceDefinitionStatusHandler struct { handler CustomResourceDefinitionStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *customResourceDefinitionStatusHandler) sync(key string, obj *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type customResourceDefinitionGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *customResourceDefinitionGeneratingHandler) Remove(key string, obj *v1.CustomResourceDefinition) (*v1.CustomResourceDefinition, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *customResourceDefinitionGeneratingHandler) Remove(key string, obj *v1.C obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured CustomResourceDefinitionGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *customResourceDefinitionGeneratingHandler) Handle(obj *v1.CustomResourceDefinition, status v1.CustomResourceDefinitionStatus) (v1.CustomResourceDefinitionStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *customResourceDefinitionGeneratingHandler) Handle(obj *v1.CustomResourc if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *customResourceDefinitionGeneratingHandler) isNewResourceVersion(obj *v1.CustomResourceDefinition) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *customResourceDefinitionGeneratingHandler) storeResourceVersion(obj *v1.CustomResourceDefinition) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiregistration.k8s.io/v1/apiservice.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiregistration.k8s.io/v1/apiservice.go index 35dbaa1bfca..eda87f4872a 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiregistration.k8s.io/v1/apiservice.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apiregistration.k8s.io/v1/apiservice.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *aPIServiceCache) GetByIndex(indexName, key string) (result []*v1.APISer return result, nil } +// APIServiceStatusHandler is executed for every added or modified APIService. Should return the new status to be updated type APIServiceStatusHandler func(obj *v1.APIService, status v1.APIServiceStatus) (v1.APIServiceStatus, error) +// APIServiceGeneratingHandler is the top-level handler that is executed for every APIService event. It extends APIServiceStatusHandler by a returning a slice of child objects to be passed to apply.Apply type APIServiceGeneratingHandler func(obj *v1.APIService, status v1.APIServiceStatus) ([]runtime.Object, v1.APIServiceStatus, error) +// RegisterAPIServiceStatusHandler configures a APIServiceController to execute a APIServiceStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterAPIServiceStatusHandler(ctx context.Context, controller APIServiceController, condition condition.Cond, name string, handler APIServiceStatusHandler) { statusHandler := &aPIServiceStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterAPIServiceStatusHandler(ctx context.Context, controller APIServiceC controller.AddGenericHandler(ctx, name, FromAPIServiceHandlerToHandler(statusHandler.sync)) } +// RegisterAPIServiceGeneratingHandler configures a APIServiceController to execute a APIServiceGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterAPIServiceGeneratingHandler(ctx context.Context, controller APIServiceController, apply apply.Apply, condition condition.Cond, name string, handler APIServiceGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &aPIServiceGeneratingHandler{ @@ -297,6 +304,7 @@ type aPIServiceStatusHandler struct { handler APIServiceStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *aPIServiceStatusHandler) sync(key string, obj *v1.APIService) (*v1.APIService, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type aPIServiceGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *aPIServiceGeneratingHandler) Remove(key string, obj *v1.APIService) (*v1.APIService, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *aPIServiceGeneratingHandler) Remove(key string, obj *v1.APIService) (*v obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured APIServiceGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *aPIServiceGeneratingHandler) Handle(obj *v1.APIService, status v1.APIServiceStatus) (v1.APIServiceStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *aPIServiceGeneratingHandler) Handle(obj *v1.APIService, status v1.APISe if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *aPIServiceGeneratingHandler) isNewResourceVersion(obj *v1.APIService) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *aPIServiceGeneratingHandler) storeResourceVersion(obj *v1.APIService) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/daemonset.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/daemonset.go index 039d680dd95..2eb133d41ab 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/daemonset.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/daemonset.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *daemonSetCache) GetByIndex(indexName, key string) (result []*v1.DaemonS return result, nil } +// DaemonSetStatusHandler is executed for every added or modified DaemonSet. Should return the new status to be updated type DaemonSetStatusHandler func(obj *v1.DaemonSet, status v1.DaemonSetStatus) (v1.DaemonSetStatus, error) +// DaemonSetGeneratingHandler is the top-level handler that is executed for every DaemonSet event. It extends DaemonSetStatusHandler by a returning a slice of child objects to be passed to apply.Apply type DaemonSetGeneratingHandler func(obj *v1.DaemonSet, status v1.DaemonSetStatus) ([]runtime.Object, v1.DaemonSetStatus, error) +// RegisterDaemonSetStatusHandler configures a DaemonSetController to execute a DaemonSetStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterDaemonSetStatusHandler(ctx context.Context, controller DaemonSetController, condition condition.Cond, name string, handler DaemonSetStatusHandler) { statusHandler := &daemonSetStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterDaemonSetStatusHandler(ctx context.Context, controller DaemonSetCon controller.AddGenericHandler(ctx, name, FromDaemonSetHandlerToHandler(statusHandler.sync)) } +// RegisterDaemonSetGeneratingHandler configures a DaemonSetController to execute a DaemonSetGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterDaemonSetGeneratingHandler(ctx context.Context, controller DaemonSetController, apply apply.Apply, condition condition.Cond, name string, handler DaemonSetGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &daemonSetGeneratingHandler{ @@ -297,6 +304,7 @@ type daemonSetStatusHandler struct { handler DaemonSetStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *daemonSetStatusHandler) sync(key string, obj *v1.DaemonSet) (*v1.DaemonSet, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type daemonSetGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *daemonSetGeneratingHandler) Remove(key string, obj *v1.DaemonSet) (*v1.DaemonSet, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *daemonSetGeneratingHandler) Remove(key string, obj *v1.DaemonSet) (*v1. obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured DaemonSetGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *daemonSetGeneratingHandler) Handle(obj *v1.DaemonSet, status v1.DaemonSetStatus) (v1.DaemonSetStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *daemonSetGeneratingHandler) Handle(obj *v1.DaemonSet, status v1.DaemonS if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *daemonSetGeneratingHandler) isNewResourceVersion(obj *v1.DaemonSet) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *daemonSetGeneratingHandler) storeResourceVersion(obj *v1.DaemonSet) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/deployment.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/deployment.go index 2adb321e991..c067aa5ba64 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/deployment.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/deployment.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *deploymentCache) GetByIndex(indexName, key string) (result []*v1.Deploy return result, nil } +// DeploymentStatusHandler is executed for every added or modified Deployment. Should return the new status to be updated type DeploymentStatusHandler func(obj *v1.Deployment, status v1.DeploymentStatus) (v1.DeploymentStatus, error) +// DeploymentGeneratingHandler is the top-level handler that is executed for every Deployment event. It extends DeploymentStatusHandler by a returning a slice of child objects to be passed to apply.Apply type DeploymentGeneratingHandler func(obj *v1.Deployment, status v1.DeploymentStatus) ([]runtime.Object, v1.DeploymentStatus, error) +// RegisterDeploymentStatusHandler configures a DeploymentController to execute a DeploymentStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterDeploymentStatusHandler(ctx context.Context, controller DeploymentController, condition condition.Cond, name string, handler DeploymentStatusHandler) { statusHandler := &deploymentStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterDeploymentStatusHandler(ctx context.Context, controller DeploymentC controller.AddGenericHandler(ctx, name, FromDeploymentHandlerToHandler(statusHandler.sync)) } +// RegisterDeploymentGeneratingHandler configures a DeploymentController to execute a DeploymentGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterDeploymentGeneratingHandler(ctx context.Context, controller DeploymentController, apply apply.Apply, condition condition.Cond, name string, handler DeploymentGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &deploymentGeneratingHandler{ @@ -297,6 +304,7 @@ type deploymentStatusHandler struct { handler DeploymentStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *deploymentStatusHandler) sync(key string, obj *v1.Deployment) (*v1.Deployment, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type deploymentGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *deploymentGeneratingHandler) Remove(key string, obj *v1.Deployment) (*v1.Deployment, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *deploymentGeneratingHandler) Remove(key string, obj *v1.Deployment) (*v obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured DeploymentGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *deploymentGeneratingHandler) Handle(obj *v1.Deployment, status v1.DeploymentStatus) (v1.DeploymentStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *deploymentGeneratingHandler) Handle(obj *v1.Deployment, status v1.Deplo if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *deploymentGeneratingHandler) isNewResourceVersion(obj *v1.Deployment) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *deploymentGeneratingHandler) storeResourceVersion(obj *v1.Deployment) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/statefulset.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/statefulset.go index a573f7c541a..04a055aa1c2 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/statefulset.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/apps/v1/statefulset.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *statefulSetCache) GetByIndex(indexName, key string) (result []*v1.State return result, nil } +// StatefulSetStatusHandler is executed for every added or modified StatefulSet. Should return the new status to be updated type StatefulSetStatusHandler func(obj *v1.StatefulSet, status v1.StatefulSetStatus) (v1.StatefulSetStatus, error) +// StatefulSetGeneratingHandler is the top-level handler that is executed for every StatefulSet event. It extends StatefulSetStatusHandler by a returning a slice of child objects to be passed to apply.Apply type StatefulSetGeneratingHandler func(obj *v1.StatefulSet, status v1.StatefulSetStatus) ([]runtime.Object, v1.StatefulSetStatus, error) +// RegisterStatefulSetStatusHandler configures a StatefulSetController to execute a StatefulSetStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterStatefulSetStatusHandler(ctx context.Context, controller StatefulSetController, condition condition.Cond, name string, handler StatefulSetStatusHandler) { statusHandler := &statefulSetStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterStatefulSetStatusHandler(ctx context.Context, controller StatefulSe controller.AddGenericHandler(ctx, name, FromStatefulSetHandlerToHandler(statusHandler.sync)) } +// RegisterStatefulSetGeneratingHandler configures a StatefulSetController to execute a StatefulSetGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterStatefulSetGeneratingHandler(ctx context.Context, controller StatefulSetController, apply apply.Apply, condition condition.Cond, name string, handler StatefulSetGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &statefulSetGeneratingHandler{ @@ -297,6 +304,7 @@ type statefulSetStatusHandler struct { handler StatefulSetStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *statefulSetStatusHandler) sync(key string, obj *v1.StatefulSet) (*v1.StatefulSet, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type statefulSetGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *statefulSetGeneratingHandler) Remove(key string, obj *v1.StatefulSet) (*v1.StatefulSet, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *statefulSetGeneratingHandler) Remove(key string, obj *v1.StatefulSet) ( obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured StatefulSetGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *statefulSetGeneratingHandler) Handle(obj *v1.StatefulSet, status v1.StatefulSetStatus) (v1.StatefulSetStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *statefulSetGeneratingHandler) Handle(obj *v1.StatefulSet, status v1.Sta if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *statefulSetGeneratingHandler) isNewResourceVersion(obj *v1.StatefulSet) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *statefulSetGeneratingHandler) storeResourceVersion(obj *v1.StatefulSet) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/batch/v1/job.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/batch/v1/job.go index ca5b9355f78..c97e79d48f2 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/batch/v1/job.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/batch/v1/job.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *jobCache) GetByIndex(indexName, key string) (result []*v1.Job, err erro return result, nil } +// JobStatusHandler is executed for every added or modified Job. Should return the new status to be updated type JobStatusHandler func(obj *v1.Job, status v1.JobStatus) (v1.JobStatus, error) +// JobGeneratingHandler is the top-level handler that is executed for every Job event. It extends JobStatusHandler by a returning a slice of child objects to be passed to apply.Apply type JobGeneratingHandler func(obj *v1.Job, status v1.JobStatus) ([]runtime.Object, v1.JobStatus, error) +// RegisterJobStatusHandler configures a JobController to execute a JobStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterJobStatusHandler(ctx context.Context, controller JobController, condition condition.Cond, name string, handler JobStatusHandler) { statusHandler := &jobStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterJobStatusHandler(ctx context.Context, controller JobController, con controller.AddGenericHandler(ctx, name, FromJobHandlerToHandler(statusHandler.sync)) } +// RegisterJobGeneratingHandler configures a JobController to execute a JobGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterJobGeneratingHandler(ctx context.Context, controller JobController, apply apply.Apply, condition condition.Cond, name string, handler JobGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &jobGeneratingHandler{ @@ -297,6 +304,7 @@ type jobStatusHandler struct { handler JobStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *jobStatusHandler) sync(key string, obj *v1.Job) (*v1.Job, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type jobGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *jobGeneratingHandler) Remove(key string, obj *v1.Job) (*v1.Job, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *jobGeneratingHandler) Remove(key string, obj *v1.Job) (*v1.Job, error) obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured JobGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *jobGeneratingHandler) Handle(obj *v1.Job, status v1.JobStatus) (v1.JobStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *jobGeneratingHandler) Handle(obj *v1.Job, status v1.JobStatus) (v1.JobS if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *jobGeneratingHandler) isNewResourceVersion(obj *v1.Job) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *jobGeneratingHandler) storeResourceVersion(obj *v1.Job) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/namespace.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/namespace.go index 83d04ac6b90..3f97d675572 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/namespace.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/namespace.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *namespaceCache) GetByIndex(indexName, key string) (result []*v1.Namespa return result, nil } +// NamespaceStatusHandler is executed for every added or modified Namespace. Should return the new status to be updated type NamespaceStatusHandler func(obj *v1.Namespace, status v1.NamespaceStatus) (v1.NamespaceStatus, error) +// NamespaceGeneratingHandler is the top-level handler that is executed for every Namespace event. It extends NamespaceStatusHandler by a returning a slice of child objects to be passed to apply.Apply type NamespaceGeneratingHandler func(obj *v1.Namespace, status v1.NamespaceStatus) ([]runtime.Object, v1.NamespaceStatus, error) +// RegisterNamespaceStatusHandler configures a NamespaceController to execute a NamespaceStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterNamespaceStatusHandler(ctx context.Context, controller NamespaceController, condition condition.Cond, name string, handler NamespaceStatusHandler) { statusHandler := &namespaceStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterNamespaceStatusHandler(ctx context.Context, controller NamespaceCon controller.AddGenericHandler(ctx, name, FromNamespaceHandlerToHandler(statusHandler.sync)) } +// RegisterNamespaceGeneratingHandler configures a NamespaceController to execute a NamespaceGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterNamespaceGeneratingHandler(ctx context.Context, controller NamespaceController, apply apply.Apply, condition condition.Cond, name string, handler NamespaceGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &namespaceGeneratingHandler{ @@ -297,6 +304,7 @@ type namespaceStatusHandler struct { handler NamespaceStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *namespaceStatusHandler) sync(key string, obj *v1.Namespace) (*v1.Namespace, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type namespaceGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *namespaceGeneratingHandler) Remove(key string, obj *v1.Namespace) (*v1.Namespace, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *namespaceGeneratingHandler) Remove(key string, obj *v1.Namespace) (*v1. obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured NamespaceGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *namespaceGeneratingHandler) Handle(obj *v1.Namespace, status v1.NamespaceStatus) (v1.NamespaceStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *namespaceGeneratingHandler) Handle(obj *v1.Namespace, status v1.Namespa if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *namespaceGeneratingHandler) isNewResourceVersion(obj *v1.Namespace) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *namespaceGeneratingHandler) storeResourceVersion(obj *v1.Namespace) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/node.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/node.go index b444b33c3c2..1bd956bfe8e 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/node.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/node.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *nodeCache) GetByIndex(indexName, key string) (result []*v1.Node, err er return result, nil } +// NodeStatusHandler is executed for every added or modified Node. Should return the new status to be updated type NodeStatusHandler func(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) +// NodeGeneratingHandler is the top-level handler that is executed for every Node event. It extends NodeStatusHandler by a returning a slice of child objects to be passed to apply.Apply type NodeGeneratingHandler func(obj *v1.Node, status v1.NodeStatus) ([]runtime.Object, v1.NodeStatus, error) +// RegisterNodeStatusHandler configures a NodeController to execute a NodeStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterNodeStatusHandler(ctx context.Context, controller NodeController, condition condition.Cond, name string, handler NodeStatusHandler) { statusHandler := &nodeStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterNodeStatusHandler(ctx context.Context, controller NodeController, c controller.AddGenericHandler(ctx, name, FromNodeHandlerToHandler(statusHandler.sync)) } +// RegisterNodeGeneratingHandler configures a NodeController to execute a NodeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterNodeGeneratingHandler(ctx context.Context, controller NodeController, apply apply.Apply, condition condition.Cond, name string, handler NodeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &nodeGeneratingHandler{ @@ -297,6 +304,7 @@ type nodeStatusHandler struct { handler NodeStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *nodeStatusHandler) sync(key string, obj *v1.Node) (*v1.Node, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type nodeGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *nodeGeneratingHandler) Remove(key string, obj *v1.Node) (*v1.Node, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *nodeGeneratingHandler) Remove(key string, obj *v1.Node) (*v1.Node, erro obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured NodeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *nodeGeneratingHandler) Handle(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *nodeGeneratingHandler) Handle(obj *v1.Node, status v1.NodeStatus) (v1.N if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *nodeGeneratingHandler) isNewResourceVersion(obj *v1.Node) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *nodeGeneratingHandler) storeResourceVersion(obj *v1.Node) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolume.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolume.go index 576c5607f05..097e450e131 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolume.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolume.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *persistentVolumeCache) GetByIndex(indexName, key string) (result []*v1. return result, nil } +// PersistentVolumeStatusHandler is executed for every added or modified PersistentVolume. Should return the new status to be updated type PersistentVolumeStatusHandler func(obj *v1.PersistentVolume, status v1.PersistentVolumeStatus) (v1.PersistentVolumeStatus, error) +// PersistentVolumeGeneratingHandler is the top-level handler that is executed for every PersistentVolume event. It extends PersistentVolumeStatusHandler by a returning a slice of child objects to be passed to apply.Apply type PersistentVolumeGeneratingHandler func(obj *v1.PersistentVolume, status v1.PersistentVolumeStatus) ([]runtime.Object, v1.PersistentVolumeStatus, error) +// RegisterPersistentVolumeStatusHandler configures a PersistentVolumeController to execute a PersistentVolumeStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterPersistentVolumeStatusHandler(ctx context.Context, controller PersistentVolumeController, condition condition.Cond, name string, handler PersistentVolumeStatusHandler) { statusHandler := &persistentVolumeStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterPersistentVolumeStatusHandler(ctx context.Context, controller Persi controller.AddGenericHandler(ctx, name, FromPersistentVolumeHandlerToHandler(statusHandler.sync)) } +// RegisterPersistentVolumeGeneratingHandler configures a PersistentVolumeController to execute a PersistentVolumeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterPersistentVolumeGeneratingHandler(ctx context.Context, controller PersistentVolumeController, apply apply.Apply, condition condition.Cond, name string, handler PersistentVolumeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &persistentVolumeGeneratingHandler{ @@ -297,6 +304,7 @@ type persistentVolumeStatusHandler struct { handler PersistentVolumeStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *persistentVolumeStatusHandler) sync(key string, obj *v1.PersistentVolume) (*v1.PersistentVolume, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type persistentVolumeGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *persistentVolumeGeneratingHandler) Remove(key string, obj *v1.PersistentVolume) (*v1.PersistentVolume, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *persistentVolumeGeneratingHandler) Remove(key string, obj *v1.Persisten obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured PersistentVolumeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *persistentVolumeGeneratingHandler) Handle(obj *v1.PersistentVolume, status v1.PersistentVolumeStatus) (v1.PersistentVolumeStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *persistentVolumeGeneratingHandler) Handle(obj *v1.PersistentVolume, sta if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *persistentVolumeGeneratingHandler) isNewResourceVersion(obj *v1.PersistentVolume) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *persistentVolumeGeneratingHandler) storeResourceVersion(obj *v1.PersistentVolume) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolumeclaim.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolumeclaim.go index 5fbce6d8857..8b23338f88e 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolumeclaim.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/persistentvolumeclaim.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *persistentVolumeClaimCache) GetByIndex(indexName, key string) (result [ return result, nil } +// PersistentVolumeClaimStatusHandler is executed for every added or modified PersistentVolumeClaim. Should return the new status to be updated type PersistentVolumeClaimStatusHandler func(obj *v1.PersistentVolumeClaim, status v1.PersistentVolumeClaimStatus) (v1.PersistentVolumeClaimStatus, error) +// PersistentVolumeClaimGeneratingHandler is the top-level handler that is executed for every PersistentVolumeClaim event. It extends PersistentVolumeClaimStatusHandler by a returning a slice of child objects to be passed to apply.Apply type PersistentVolumeClaimGeneratingHandler func(obj *v1.PersistentVolumeClaim, status v1.PersistentVolumeClaimStatus) ([]runtime.Object, v1.PersistentVolumeClaimStatus, error) +// RegisterPersistentVolumeClaimStatusHandler configures a PersistentVolumeClaimController to execute a PersistentVolumeClaimStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterPersistentVolumeClaimStatusHandler(ctx context.Context, controller PersistentVolumeClaimController, condition condition.Cond, name string, handler PersistentVolumeClaimStatusHandler) { statusHandler := &persistentVolumeClaimStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterPersistentVolumeClaimStatusHandler(ctx context.Context, controller controller.AddGenericHandler(ctx, name, FromPersistentVolumeClaimHandlerToHandler(statusHandler.sync)) } +// RegisterPersistentVolumeClaimGeneratingHandler configures a PersistentVolumeClaimController to execute a PersistentVolumeClaimGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterPersistentVolumeClaimGeneratingHandler(ctx context.Context, controller PersistentVolumeClaimController, apply apply.Apply, condition condition.Cond, name string, handler PersistentVolumeClaimGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &persistentVolumeClaimGeneratingHandler{ @@ -297,6 +304,7 @@ type persistentVolumeClaimStatusHandler struct { handler PersistentVolumeClaimStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *persistentVolumeClaimStatusHandler) sync(key string, obj *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type persistentVolumeClaimGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *persistentVolumeClaimGeneratingHandler) Remove(key string, obj *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *persistentVolumeClaimGeneratingHandler) Remove(key string, obj *v1.Pers obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured PersistentVolumeClaimGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *persistentVolumeClaimGeneratingHandler) Handle(obj *v1.PersistentVolumeClaim, status v1.PersistentVolumeClaimStatus) (v1.PersistentVolumeClaimStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *persistentVolumeClaimGeneratingHandler) Handle(obj *v1.PersistentVolume if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *persistentVolumeClaimGeneratingHandler) isNewResourceVersion(obj *v1.PersistentVolumeClaim) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *persistentVolumeClaimGeneratingHandler) storeResourceVersion(obj *v1.PersistentVolumeClaim) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/pod.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/pod.go index 4286bcf56e7..b52fe1c4046 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/pod.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/pod.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *podCache) GetByIndex(indexName, key string) (result []*v1.Pod, err erro return result, nil } +// PodStatusHandler is executed for every added or modified Pod. Should return the new status to be updated type PodStatusHandler func(obj *v1.Pod, status v1.PodStatus) (v1.PodStatus, error) +// PodGeneratingHandler is the top-level handler that is executed for every Pod event. It extends PodStatusHandler by a returning a slice of child objects to be passed to apply.Apply type PodGeneratingHandler func(obj *v1.Pod, status v1.PodStatus) ([]runtime.Object, v1.PodStatus, error) +// RegisterPodStatusHandler configures a PodController to execute a PodStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterPodStatusHandler(ctx context.Context, controller PodController, condition condition.Cond, name string, handler PodStatusHandler) { statusHandler := &podStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterPodStatusHandler(ctx context.Context, controller PodController, con controller.AddGenericHandler(ctx, name, FromPodHandlerToHandler(statusHandler.sync)) } +// RegisterPodGeneratingHandler configures a PodController to execute a PodGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterPodGeneratingHandler(ctx context.Context, controller PodController, apply apply.Apply, condition condition.Cond, name string, handler PodGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &podGeneratingHandler{ @@ -297,6 +304,7 @@ type podStatusHandler struct { handler PodStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *podStatusHandler) sync(key string, obj *v1.Pod) (*v1.Pod, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type podGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *podGeneratingHandler) Remove(key string, obj *v1.Pod) (*v1.Pod, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *podGeneratingHandler) Remove(key string, obj *v1.Pod) (*v1.Pod, error) obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured PodGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *podGeneratingHandler) Handle(obj *v1.Pod, status v1.PodStatus) (v1.PodStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *podGeneratingHandler) Handle(obj *v1.Pod, status v1.PodStatus) (v1.PodS if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *podGeneratingHandler) isNewResourceVersion(obj *v1.Pod) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *podGeneratingHandler) storeResourceVersion(obj *v1.Pod) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/service.go b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/service.go index 4802feb6bad..a347887c654 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/service.go +++ b/vendor/github.com/rancher/wrangler/pkg/generated/controllers/core/v1/service.go @@ -20,6 +20,7 @@ package v1 import ( "context" + "sync" "time" "github.com/rancher/lasso/pkg/client" @@ -263,10 +264,14 @@ func (c *serviceCache) GetByIndex(indexName, key string) (result []*v1.Service, return result, nil } +// ServiceStatusHandler is executed for every added or modified Service. Should return the new status to be updated type ServiceStatusHandler func(obj *v1.Service, status v1.ServiceStatus) (v1.ServiceStatus, error) +// ServiceGeneratingHandler is the top-level handler that is executed for every Service event. It extends ServiceStatusHandler by a returning a slice of child objects to be passed to apply.Apply type ServiceGeneratingHandler func(obj *v1.Service, status v1.ServiceStatus) ([]runtime.Object, v1.ServiceStatus, error) +// RegisterServiceStatusHandler configures a ServiceController to execute a ServiceStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterServiceStatusHandler(ctx context.Context, controller ServiceController, condition condition.Cond, name string, handler ServiceStatusHandler) { statusHandler := &serviceStatusHandler{ client: controller, @@ -276,6 +281,8 @@ func RegisterServiceStatusHandler(ctx context.Context, controller ServiceControl controller.AddGenericHandler(ctx, name, FromServiceHandlerToHandler(statusHandler.sync)) } +// RegisterServiceGeneratingHandler configures a ServiceController to execute a ServiceGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution func RegisterServiceGeneratingHandler(ctx context.Context, controller ServiceController, apply apply.Apply, condition condition.Cond, name string, handler ServiceGeneratingHandler, opts *generic.GeneratingHandlerOptions) { statusHandler := &serviceGeneratingHandler{ @@ -297,6 +304,7 @@ type serviceStatusHandler struct { handler ServiceStatusHandler } +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API func (a *serviceStatusHandler) sync(key string, obj *v1.Service) (*v1.Service, error) { if obj == nil { return obj, nil @@ -342,8 +350,10 @@ type serviceGeneratingHandler struct { opts generic.GeneratingHandlerOptions gvk schema.GroupVersionKind name string + seen sync.Map } +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied func (a *serviceGeneratingHandler) Remove(key string, obj *v1.Service) (*v1.Service, error) { if obj != nil { return obj, nil @@ -353,12 +363,17 @@ func (a *serviceGeneratingHandler) Remove(key string, obj *v1.Service) (*v1.Serv obj.Namespace, obj.Name = kv.RSplit(key, "/") obj.SetGroupVersionKind(a.gvk) + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects() } +// Handle executes the configured ServiceGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource func (a *serviceGeneratingHandler) Handle(obj *v1.Service, status v1.ServiceStatus) (v1.ServiceStatus, error) { if !obj.DeletionTimestamp.IsZero() { return status, nil @@ -368,9 +383,41 @@ func (a *serviceGeneratingHandler) Handle(obj *v1.Service, status v1.ServiceStat if err != nil { return newStatus, err } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } - return newStatus, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). WithOwner(obj). WithSetID(a.name). ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *serviceGeneratingHandler) isNewResourceVersion(obj *v1.Service) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *serviceGeneratingHandler) storeResourceVersion(obj *v1.Service) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) } diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/generating.go b/vendor/github.com/rancher/wrangler/pkg/generic/generating.go index 9ac9fae8631..3063e47f09a 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generic/generating.go +++ b/vendor/github.com/rancher/wrangler/pkg/generic/generating.go @@ -10,6 +10,8 @@ type GeneratingHandlerOptions struct { AllowClusterScoped bool NoOwnerReference bool DynamicLookup bool + // UniqueApplyForResourceVersion will skip calling apply if the resource version didn't change from the previous execution + UniqueApplyForResourceVersion bool } func ConfigureApplyForObject(apply apply.Apply, obj metav1.Object, opts *GeneratingHandlerOptions) apply.Apply { diff --git a/vendor/github.com/rancher/wrangler/pkg/schemas/mappers/enum.go b/vendor/github.com/rancher/wrangler/pkg/schemas/mappers/enum.go index d5e2b01ea13..d7c2f475d76 100644 --- a/vendor/github.com/rancher/wrangler/pkg/schemas/mappers/enum.go +++ b/vendor/github.com/rancher/wrangler/pkg/schemas/mappers/enum.go @@ -12,8 +12,7 @@ import ( type Enum struct { DefaultMapper - field string - vals map[string]string + vals map[string]string } func NewEnum(field string, vals ...string) schemas.Mapper { diff --git a/vendor/github.com/rancher/wrangler/pkg/signals/signal.go b/vendor/github.com/rancher/wrangler/pkg/signals/signal.go index ccbdabe7597..724b9e1981b 100644 --- a/vendor/github.com/rancher/wrangler/pkg/signals/signal.go +++ b/vendor/github.com/rancher/wrangler/pkg/signals/signal.go @@ -63,4 +63,4 @@ func RequestShutdown() bool { } return false -} \ No newline at end of file +} diff --git a/vendor/github.com/rancher/wrangler/pkg/signals/signal_posix.go b/vendor/github.com/rancher/wrangler/pkg/signals/signal_posix.go index 9bdb4e7418b..a0f00a73213 100644 --- a/vendor/github.com/rancher/wrangler/pkg/signals/signal_posix.go +++ b/vendor/github.com/rancher/wrangler/pkg/signals/signal_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/client/interface.go b/vendor/github.com/rancher/wrangler/pkg/summary/client/interface.go index 3b646c17d51..46243fcba66 100644 --- a/vendor/github.com/rancher/wrangler/pkg/summary/client/interface.go +++ b/vendor/github.com/rancher/wrangler/pkg/summary/client/interface.go @@ -2,6 +2,7 @@ package client import ( "context" + "github.com/rancher/wrangler/pkg/summary" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/client/simple.go b/vendor/github.com/rancher/wrangler/pkg/summary/client/simple.go index 5fd919c415c..0277d3549d2 100644 --- a/vendor/github.com/rancher/wrangler/pkg/summary/client/simple.go +++ b/vendor/github.com/rancher/wrangler/pkg/summary/client/simple.go @@ -2,6 +2,7 @@ package client import ( "context" + "github.com/rancher/wrangler/pkg/summary" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go index 4f93c7522fe..17e2b9c844e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_aix_nocgo.go @@ -12,7 +12,6 @@ import ( "golang.org/x/sys/unix" ) -var whiteSpaces = regexp.MustCompile(`\s+`) var startBlank = regexp.MustCompile(`^\s+`) var ignoreFSType = map[string]bool{"procfs": true} @@ -60,7 +59,7 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro if startBlank.MatchString(line) { line = "localhost" + line } - p := whiteSpaces.Split(lines[idx], 6) + p := strings.Fields(lines[idx]) if len(p) < 5 || ignoreFSType[p[colidx["vfs"]]] { continue } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go index 47687334037..36525f694b9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_fallback.go @@ -1,5 +1,5 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!aix +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !windows && !solaris && !aix +// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!windows,!solaris,!aix package disk diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go index 2a8c50ae48e..ada9f9e76c3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_linux.go @@ -5,11 +5,9 @@ package disk import ( "bufio" - "bytes" "context" "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -474,7 +472,11 @@ func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOC } d.Name = name - d.SerialNumber, _ = SerialNumberWithContext(ctx, name) + // Names passed in can be full paths (/dev/sda) or just device names (sda). + // Since `name` here is already a basename, re-add the /dev path. + // This is not ideal, but we may break the API by changing how SerialNumberWithContext + // works. + d.SerialNumber, _ = SerialNumberWithContext(ctx, common.HostDevWithContext(ctx, name)) d.Label, _ = LabelWithContext(ctx, name) ret[name] = d @@ -482,32 +484,42 @@ func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOC return ret, nil } +func udevData(ctx context.Context, major uint32, minor uint32, name string) (string, error) { + udevDataPath := common.HostRunWithContext(ctx, fmt.Sprintf("udev/data/b%d:%d", major, minor)) + if f, err := os.Open(udevDataPath); err == nil { + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + values := strings.SplitN(scanner.Text(), "=", 3) + if len(values) == 2 && values[0] == name { + return values[1], nil + } + } + return "", scanner.Err() + } else if !os.IsNotExist(err) { + return "", err + } + return "", nil +} + func SerialNumberWithContext(ctx context.Context, name string) (string, error) { var stat unix.Stat_t - err := unix.Stat(name, &stat) - if err != nil { + if err := unix.Stat(name, &stat); err != nil { return "", err } major := unix.Major(uint64(stat.Rdev)) minor := unix.Minor(uint64(stat.Rdev)) - // Try to get the serial from udev data - udevDataPath := common.HostRunWithContext(ctx, fmt.Sprintf("udev/data/b%d:%d", major, minor)) - if udevdata, err := ioutil.ReadFile(udevDataPath); err == nil { - scanner := bufio.NewScanner(bytes.NewReader(udevdata)) - for scanner.Scan() { - values := strings.Split(scanner.Text(), "=") - if len(values) == 2 && values[0] == "E:ID_SERIAL" { - return values[1], nil - } - } + sserial, _ := udevData(ctx, major, minor, "E:ID_SERIAL") + if sserial != "" { + return sserial, nil } // Try to get the serial from sysfs, look at the disk device (minor 0) directly // because if it is a partition it is not going to contain any device information devicePath := common.HostSysWithContext(ctx, fmt.Sprintf("dev/block/%d:0/device", major)) - model, _ := ioutil.ReadFile(filepath.Join(devicePath, "model")) - serial, _ := ioutil.ReadFile(filepath.Join(devicePath, "serial")) + model, _ := os.ReadFile(filepath.Join(devicePath, "model")) + serial, _ := os.ReadFile(filepath.Join(devicePath, "serial")) if len(model) > 0 && len(serial) > 0 { return fmt.Sprintf("%s_%s", string(model), string(serial)), nil } @@ -517,16 +529,26 @@ func SerialNumberWithContext(ctx context.Context, name string) (string, error) { func LabelWithContext(ctx context.Context, name string) (string, error) { // Try label based on devicemapper name dmname_filename := common.HostSysWithContext(ctx, fmt.Sprintf("block/%s/dm/name", name)) - - if !common.PathExists(dmname_filename) { - return "", nil + // Could errors.Join errs with Go >= 1.20 + if common.PathExists(dmname_filename) { + dmname, err := os.ReadFile(dmname_filename) + if err == nil { + return strings.TrimSpace(string(dmname)), nil + } } + // Try udev data + var stat unix.Stat_t + if err := unix.Stat(common.HostDevWithContext(ctx, name), &stat); err != nil { + return "", err + } + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) - dmname, err := ioutil.ReadFile(dmname_filename) + label, err := udevData(ctx, major, minor, "E:ID_FS_LABEL") if err != nil { return "", err } - return strings.TrimSpace(string(dmname)), nil + return label, nil } func getFsType(stat unix.Statfs_t) string { diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd.go new file mode 100644 index 00000000000..5976efadb37 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd.go @@ -0,0 +1,152 @@ +//go:build netbsd +// +build netbsd + +package disk + +import ( + "context" + "unsafe" + + "github.com/shirou/gopsutil/v3/internal/common" + "golang.org/x/sys/unix" +) + +const ( + // see sys/fstypes.h and `man 5 statvfs` + MNT_RDONLY = 0x00000001 /* read only filesystem */ + MNT_SYNCHRONOUS = 0x00000002 /* file system written synchronously */ + MNT_NOEXEC = 0x00000004 /* can't exec from filesystem */ + MNT_NOSUID = 0x00000008 /* don't honor setuid bits on fs */ + MNT_NODEV = 0x00000010 /* don't interpret special files */ + MNT_ASYNC = 0x00000040 /* file system written asynchronously */ + MNT_NOATIME = 0x04000000 /* Never update access times in fs */ + MNT_SOFTDEP = 0x80000000 /* Use soft dependencies */ +) + +func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, error) { + var ret []PartitionStat + + flag := uint64(1) // ST_WAIT/MNT_WAIT, see sys/fstypes.h + + // get required buffer size + emptyBufSize := 0 + r, _, err := unix.Syscall( + 483, // SYS___getvfsstat90 syscall + uintptr(unsafe.Pointer(nil)), + uintptr(unsafe.Pointer(&emptyBufSize)), + uintptr(unsafe.Pointer(&flag)), + ) + if err != 0 { + return ret, err + } + mountedFsCount := uint64(r) + + // calculate the buffer size + bufSize := sizeOfStatvfs * mountedFsCount + buf := make([]Statvfs, mountedFsCount) + + // request agian to get desired mount data + _, _, err = unix.Syscall( + 483, // SYS___getvfsstat90 syscall + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&bufSize)), + uintptr(unsafe.Pointer(&flag)), + ) + if err != 0 { + return ret, err + } + + for _, stat := range buf { + opts := []string{"rw"} + if stat.Flag&MNT_RDONLY != 0 { + opts = []string{"rw"} + } + if stat.Flag&MNT_SYNCHRONOUS != 0 { + opts = append(opts, "sync") + } + if stat.Flag&MNT_NOEXEC != 0 { + opts = append(opts, "noexec") + } + if stat.Flag&MNT_NOSUID != 0 { + opts = append(opts, "nosuid") + } + if stat.Flag&MNT_NODEV != 0 { + opts = append(opts, "nodev") + } + if stat.Flag&MNT_ASYNC != 0 { + opts = append(opts, "async") + } + if stat.Flag&MNT_SOFTDEP != 0 { + opts = append(opts, "softdep") + } + if stat.Flag&MNT_NOATIME != 0 { + opts = append(opts, "noatime") + } + + d := PartitionStat{ + Device: common.ByteToString([]byte(stat.Mntfromname[:])), + Mountpoint: common.ByteToString([]byte(stat.Mntonname[:])), + Fstype: common.ByteToString([]byte(stat.Fstypename[:])), + Opts: opts, + } + + ret = append(ret, d) + } + + return ret, nil +} + +func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { + ret := make(map[string]IOCountersStat) + return ret, common.ErrNotImplementedError +} + +func UsageWithContext(ctx context.Context, path string) (*UsageStat, error) { + stat := Statvfs{} + flag := uint64(1) // ST_WAIT/MNT_WAIT, see sys/fstypes.h + + _path, e := unix.BytePtrFromString(path) + if e != nil { + return nil, e + } + + _, _, err := unix.Syscall( + 484, // SYS___statvfs190, see sys/syscall.h + uintptr(unsafe.Pointer(_path)), + uintptr(unsafe.Pointer(&stat)), + uintptr(unsafe.Pointer(&flag)), + ) + if err != 0 { + return nil, err + } + + // frsize is the real block size on NetBSD. See discuss here: https://bugzilla.samba.org/show_bug.cgi?id=11810 + bsize := stat.Frsize + ret := &UsageStat{ + Path: path, + Fstype: getFsType(stat), + Total: (uint64(stat.Blocks) * uint64(bsize)), + Free: (uint64(stat.Bavail) * uint64(bsize)), + InodesTotal: (uint64(stat.Files)), + InodesFree: (uint64(stat.Ffree)), + } + + ret.InodesUsed = (ret.InodesTotal - ret.InodesFree) + ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0 + ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize) + ret.UsedPercent = (float64(ret.Used) / float64(ret.Total)) * 100.0 + + return ret, nil +} + +func getFsType(stat Statvfs) string { + return common.ByteToString(stat.Fstypename[:]) +} + +func SerialNumberWithContext(ctx context.Context, name string) (string, error) { + return "", common.ErrNotImplementedError +} + +func LabelWithContext(ctx context.Context, name string) (string, error) { + return "", common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd_amd64.go new file mode 100644 index 00000000000..c21421cfeb6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd_amd64.go @@ -0,0 +1,45 @@ +//go:build netbsd && amd64 +// +build netbsd,amd64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_netbsd.go + +package disk + +const ( + sizeOfStatvfs = 0xce0 +) + +type ( + Statvfs struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx _Ctype_struct___0 + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint64 + Fstypename [32]uint8 + Mntonname [1024]uint8 + Mntfromname [1024]uint8 + Mntfromlabel [1024]uint8 + } +) + +type _Ctype_struct___0 struct { + FsidVal [2]int32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd_arm64.go new file mode 100644 index 00000000000..dfe48f8126e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_netbsd_arm64.go @@ -0,0 +1,45 @@ +//go:build netbsd && arm64 +// +build netbsd,arm64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_netbsd.go + +package disk + +const ( + sizeOfStatvfs = 0xce0 +) + +type ( + Statvfs struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx _Ctype_struct___0 + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint64 + Fstypename [32]uint8 + Mntonname [1024]uint8 + Mntfromname [1024]uint8 + Mntfromlabel [1024]uint8 + } +) + +type _Ctype_struct___0 struct { + FsidVal [2]int32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_riscv64.go new file mode 100644 index 00000000000..8374b94eba7 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_openbsd_riscv64.go @@ -0,0 +1,40 @@ +//go:build openbsd && riscv64 +// +build openbsd,riscv64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs disk/types_openbsd.go + +package disk + +const ( + devstat_NO_DATA = 0x00 + devstat_READ = 0x01 + devstat_WRITE = 0x02 + devstat_FREE = 0x03 +) + +const ( + sizeOfDiskstats = 0x70 +) + +type ( + Diskstats struct { + Name [16]int8 + Busy int32 + Rxfer uint64 + Wxfer uint64 + Seek uint64 + Rbytes uint64 + Wbytes uint64 + Attachtime Timeval + Timestamp Timeval + Time Timeval + } + Timeval struct { + Sec int64 + Usec int64 + } +) + +type Diskstat struct{} +type bintime struct{} diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go index 934d651ffbb..5d6ea8653ac 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_solaris.go @@ -83,6 +83,8 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro return ret, err } +var kstatSplit = regexp.MustCompile(`[:\s]+`) + func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOCountersStat, error) { var issolaris bool if runtime.GOOS == "illumos" { @@ -107,7 +109,6 @@ func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOC writesarr := make(map[string]uint64) rtimearr := make(map[string]uint64) wtimearr := make(map[string]uint64) - re := regexp.MustCompile(`[:\s]+`) // in case the name is "/dev/sda1", then convert to "sda1" for i, name := range names { @@ -115,7 +116,7 @@ func IOCountersWithContext(ctx context.Context, names ...string) (map[string]IOC } for _, line := range lines { - fields := re.Split(line, -1) + fields := kstatSplit.Split(line, -1) if len(fields) == 0 { continue } diff --git a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go index 8a1a28d69a3..e17db3e5be3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go +++ b/vendor/github.com/shirou/gopsutil/v3/disk/disk_windows.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "fmt" - "sync" "syscall" "unsafe" @@ -51,6 +50,7 @@ func init() { key, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Services\PartMgr`, registry.SET_VALUE) if err == nil { key.SetDWordValue("EnableCounterForIoctl", 1) + key.Close() } } @@ -86,28 +86,22 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro warnings := Warnings{ Verbose: true, } - var ret []PartitionStat - retChan := make(chan []PartitionStat) - errChan := make(chan error) - lpBuffer := make([]byte, 254) - - var waitgrp sync.WaitGroup - waitgrp.Add(1) - defer waitgrp.Done() - - f := func() { - defer func() { - waitgrp.Wait() - // fires when this func and the outside func finishes. - close(errChan) - close(retChan) - }() + + var errLogicalDrives error + retChan := make(chan PartitionStat) + quitChan := make(chan struct{}) + defer close(quitChan) + + getPartitions := func() { + defer close(retChan) + + lpBuffer := make([]byte, 254) diskret, _, err := procGetLogicalDriveStringsW.Call( uintptr(len(lpBuffer)), uintptr(unsafe.Pointer(&lpBuffer[0]))) if diskret == 0 { - errChan <- err + errLogicalDrives = err return } for _, v := range lpBuffer { @@ -153,27 +147,37 @@ func PartitionsWithContext(ctx context.Context, all bool) ([]PartitionStat, erro opts = append(opts, "compress") } - d := PartitionStat{ + select { + case retChan <- PartitionStat{ Mountpoint: path, Device: path, - Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)), + Fstype: string(bytes.ReplaceAll(lpFileSystemNameBuffer, []byte("\x00"), []byte(""))), Opts: opts, + }: + case <-quitChan: + return } - ret = append(ret, d) } } } - retChan <- ret } - go f() - select { - case err := <-errChan: - return ret, err - case ret := <-retChan: - return ret, warnings.Reference() - case <-ctx.Done(): - return ret, ctx.Err() + go getPartitions() + + var ret []PartitionStat + for { + select { + case p, ok := <-retChan: + if !ok { + if errLogicalDrives != nil { + return ret, errLogicalDrives + } + return ret, warnings.Reference() + } + ret = append(ret, p) + case <-ctx.Done(): + return ret, ctx.Err() + } } } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go index 9bfece3628d..5e25e507b48 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go @@ -14,7 +14,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/url" "os" "os/exec" @@ -87,7 +86,7 @@ func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { fpath += "_" + i.Suffix } if PathExists(fpath) { - return ioutil.ReadFile(fpath) + return os.ReadFile(fpath) } return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) } @@ -100,7 +99,7 @@ var ErrNotImplementedError = errors.New("not implemented yet") // ReadFile reads contents from a file func ReadFile(filename string) (string, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return "", err } @@ -114,6 +113,30 @@ func ReadLines(filename string) ([]string, error) { return ReadLinesOffsetN(filename, 0, -1) } +// ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. +func ReadLine(filename string, prefix string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return "", err + } + if strings.HasPrefix(line, prefix) { + return line, nil + } + } + + return "", nil +} + // ReadLinesOffsetN reads contents from file and splits them by new line. // The offset tells at which line number to start. // The count determines the number of lines to read (starting from offset): @@ -320,7 +343,7 @@ func PathExistsWithContents(filename string) bool { if err != nil { return false } - return info.Size() > 4 // at least 4 bytes + return info.Size() > 4 && !info.IsDir() // at least 4 bytes } // GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go index b58edbeb0a1..a429e16a2cc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go @@ -12,10 +12,14 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" ) +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + func DoSysctrl(mib string) ([]string, error) { cmd := exec.Command("sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) @@ -56,23 +60,62 @@ func NumProcsWithContext(ctx context.Context) (uint64, error) { return cnt, nil } -func BootTimeWithContext(ctx context.Context) (uint64, error) { +func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) { + if enableCache { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + } + system, role, err := VirtualizationWithContext(ctx) if err != nil { return 0, err } - statFile := "stat" + useStatFile := true if system == "lxc" && role == "guest" { // if lxc, /proc/uptime is used. - statFile = "uptime" + useStatFile = false } else if system == "docker" && role == "guest" { // also docker, guest - statFile = "uptime" + useStatFile = false + } + + if useStatFile { + t, err := readBootTimeStat(ctx) + if err != nil { + return 0, err + } + if enableCache { + atomic.StoreUint64(&cachedBootTime, t) + } } - filename := HostProcWithContext(ctx, statFile) + filename := HostProcWithContext(ctx, "uptime") lines, err := ReadLines(filename) + if err != nil { + return handleBootTimeFileReadErr(err) + } + if len(lines) != 1 { + return 0, fmt.Errorf("wrong uptime format") + } + f := strings.Fields(lines[0]) + b, err := strconv.ParseFloat(f[0], 64) + if err != nil { + return 0, err + } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + t := currentTime - b + + if enableCache { + atomic.StoreUint64(&cachedBootTime, uint64(t)) + } + + return uint64(t), nil +} + +func handleBootTimeFileReadErr(err error) (uint64, error) { if os.IsPermission(err) { var info syscall.Sysinfo_t err := syscall.Sysinfo(&info) @@ -84,39 +127,27 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { t := currentTime - int64(info.Uptime) return uint64(t), nil } + return 0, err +} + +func readBootTimeStat(ctx context.Context) (uint64, error) { + filename := HostProcWithContext(ctx, "stat") + line, err := ReadLine(filename, "btime") if err != nil { - return 0, err + return handleBootTimeFileReadErr(err) } - - if statFile == "stat" { - for _, line := range lines { - if strings.HasPrefix(line, "btime") { - f := strings.Fields(line) - if len(f) != 2 { - return 0, fmt.Errorf("wrong btime format") - } - b, err := strconv.ParseInt(f[1], 10, 64) - if err != nil { - return 0, err - } - t := uint64(b) - return t, nil - } + if strings.HasPrefix(line, "btime") { + f := strings.Fields(line) + if len(f) != 2 { + return 0, fmt.Errorf("wrong btime format") } - } else if statFile == "uptime" { - if len(lines) != 1 { - return 0, fmt.Errorf("wrong uptime format") - } - f := strings.Fields(lines[0]) - b, err := strconv.ParseFloat(f[0], 64) + b, err := strconv.ParseInt(f[1], 10, 64) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) - t := currentTime - b - return uint64(t), nil + t := uint64(b) + return t, nil } - return 0, fmt.Errorf("could not find btime") } @@ -298,7 +329,7 @@ func GetOSReleaseWithContext(ctx context.Context) (platform string, version stri switch field[0] { case "ID": // use ID for lowercase platform = trimQuotes(field[1]) - case "VERSION": + case "VERSION_ID": version = trimQuotes(field[1]) } } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go new file mode 100644 index 00000000000..efbc710a5ef --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go @@ -0,0 +1,66 @@ +//go:build netbsd +// +build netbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/yusufpapurcu/wmi/wmi.go b/vendor/github.com/yusufpapurcu/wmi/wmi.go index 26c3581c975..03f386ed59c 100644 --- a/vendor/github.com/yusufpapurcu/wmi/wmi.go +++ b/vendor/github.com/yusufpapurcu/wmi/wmi.go @@ -456,6 +456,18 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat Reason: "not a Float32", } } + case float64: + switch f.Kind() { + case reflect.Float32, reflect.Float64: + f.SetFloat(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float64", + } + } + default: if f.Kind() == reflect.Slice { switch f.Type().Elem().Kind() { diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 5954801122a..856c75dd4e2 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.32.0 +// protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index e6f2625b684..f0b7f3200f1 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1772,6 +1772,8 @@ func parseTarget(target string) (resolver.Target, error) { return resolver.Target{URL: *u}, nil } +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. func encodeAuthority(authority string) string { const upperhex = "0123456789ABCDEF" @@ -1860,27 +1862,15 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - // Escape the endpoint to handle use cases where the endpoint - // might not be a valid authority by default. - // For example an endpoint which has multiple paths like - // 'a/b/c', which is not a valid authority by default. + } else { cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 0ee3d3bae97..66d5cdf03ec 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -23,8 +23,9 @@ package proto import ( "fmt" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/encoding" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) // Name is the name registered for the proto compressor. @@ -38,21 +39,34 @@ func init() { type codec struct{} func (codec) Marshal(v any) ([]byte, error) { - vv, ok := v.(proto.Message) - if !ok { + vv := messageV2Of(v) + if vv == nil { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) } + return proto.Marshal(vv) } func (codec) Unmarshal(data []byte, v any) error { - vv, ok := v.(proto.Message) - if !ok { + vv := messageV2Of(v) + if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } + return proto.Unmarshal(data, vv) } +func messageV2Of(v any) proto.Message { + switch v := v.(type) { + case protoadapt.MessageV1: + return protoadapt.MessageV2Of(v) + case protoadapt.MessageV2: + return v + } + + return nil +} + func (codec) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 24299efd63f..5bf880d4190 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 -// protoc v4.22.0 +// protoc-gen-go v1.32.0 +// protoc v4.25.2 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 4439cda0f3c..4c46c098dc6 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v4.22.0 +// - protoc v4.25.2 // source: grpc/health/v1/health.proto package grpc_health_v1 diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0f31274a3cc..e8456a77c25 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -25,11 +25,12 @@ import ( "sync/atomic" "time" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) type callIDGenerator struct { @@ -88,7 +89,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // in TruncatingMethodLogger as possible. func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() - timestamp, _ := ptypes.TimestampProto(time.Now()) + timestamp := timestamppb.Now() m.Timestamp = timestamp m.CallId = ml.callID m.SequenceIdWithinCall = ml.idWithinCallGen.next() @@ -178,7 +179,7 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { Authority: c.Authority, } if c.Timeout > 0 { - clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + clientHeader.Timeout = durationpb.New(c.Timeout) } ret := &binlogpb.GrpcLogEntry{ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 264de387c2a..9ea598b14c0 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -25,8 +25,8 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/protobuf/proto" ) var ( diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index aa97273e7d1..0126d6b5108 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -1,3 +1,8 @@ +//go:build !go1.21 + +// TODO: when this file is deleted (after Go 1.20 support is dropped), delete +// all of grpcrand and call the rand package directly. + /* * * Copyright 2018 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go new file mode 100644 index 00000000000..c37299af1ef --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go @@ -0,0 +1,73 @@ +//go:build go1.21 + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import "math/rand" + +// This implementation will be used for Go version 1.21 or newer. +// For older versions, the original implementation with mutex will be used. + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + return rand.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + return rand.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + return rand.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + return rand.Int31n(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + return rand.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + return rand.Uint64() +} + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + return rand.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + return rand.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + rand.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 2549fe8e3b8..6c7ea6a5336 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -57,7 +57,7 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. @@ -68,11 +68,6 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. CanonicalString any // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports any // func(*grpc.Server, string) // IsRegisteredMethod returns whether the passed in method is registered as // a method on the server. IsRegisteredMethod any // func(*grpc.Server, string) bool @@ -188,6 +183,19 @@ var ( ExitIdleModeForTesting any // func(*grpc.ClientConn) error ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 7033191375d..52cfab1b93d 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -24,7 +24,6 @@ import ( "encoding/json" "fmt" - "github.com/golang/protobuf/jsonpb" protov1 "github.com/golang/protobuf/proto" "google.golang.org/protobuf/encoding/protojson" protov2 "google.golang.org/protobuf/proto" @@ -38,15 +37,15 @@ const jsonIndent = " " func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: - mm := jsonpb.Marshaler{Indent: jsonIndent} - ret, err := mm.MarshalToString(ee) + mm := protojson.MarshalOptions{Indent: jsonIndent} + ret, err := mm.Marshal(protov1.MessageV2(ee)) if err != nil { // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 // messages are not imported, and this will fail because the message // is not found. return fmt.Sprintf("%+v", ee) } - return ret + return string(ret) case protov2.Message: mm := protojson.MarshalOptions{ Multiline: true, diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 16091168773..27cd81af9e5 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 03ef2fedd5c..c7dbc820595 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -31,10 +31,11 @@ import ( "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" ) // Status represents an RPC status code, message, and details. It is immutable @@ -130,14 +131,14 @@ func (s *Status) Err() error { // WithDetails returns a new status with the provided details messages appended to the status. // If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { +func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") } // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := ptypes.MarshalAny(detail) + any, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } @@ -154,12 +155,12 @@ func (s *Status) Details() []any { } details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { + detail, err := any.UnmarshalNew() + if err != nil { details = append(details, err) continue } - details = append(details, detail.Message) + details = append(details, detail) } return details } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go similarity index 96% rename from vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go rename to vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go index aeffd3e1c7b..4f347edd423 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -1,4 +1,4 @@ -//go:build !unix +//go:build !unix && !windows /* * Copyright 2023 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 00000000000..fd7d43a8907 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b330ccedc8a..83c3829826a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -535,8 +535,8 @@ const minBatchSize = 1000 // size is too low to give stream goroutines a chance to fill it up. // // Upon exiting, if the error causing the exit is not an I/O error, run() -// flushes and closes the underlying connection. Otherwise, the connection is -// left open to allow the I/O error to be encountered by the reader instead. +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. func (l *loopyWriter) run() (err error) { defer func() { if l.logger.V(logLevel) { @@ -544,7 +544,6 @@ func (l *loopyWriter) run() (err error) { } if !isIOError(err) { l.framer.writer.Flush() - l.conn.Close() } l.cbuf.finish() }() diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index a9d70e2a16c..bd39ff9a229 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -35,7 +35,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -45,6 +44,7 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // NewServerHandlerTransport returns a ServerTransport handling gRPC from diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 59f67655a85..eff8799640c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -449,7 +451,13 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.run() + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } close(t.writerDone) }() return t, nil @@ -568,7 +576,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -1323,10 +1331,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 680c9eba0b1..3839c1ade27 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -32,13 +32,13 @@ import ( "sync/atomic" "time" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -322,8 +322,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - t.loopy.run() + err := t.loopy.run() close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + select { + case <-t.readerDone: + case <-time.After(time.Second): + } + t.conn.Close() + } }() go t.keepalive() return t, nil @@ -609,8 +625,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // traceCtx attaches trace to ctx and returns the new context. func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { defer func() { - <-t.loopyWriterDone close(t.readerDone) + <-t.loopyWriterDone }() for { t.controlBuf.throttle() @@ -636,10 +652,6 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } continue } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close(err) - return - } t.Close(err) return } @@ -960,7 +972,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } @@ -1324,6 +1341,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } + t.framer.writer.Flush() if retErr != nil { return false, retErr } @@ -1344,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, err } go func() { - timer := time.NewTimer(time.Minute) + timer := time.NewTimer(5 * time.Second) defer timer.Stop() select { case <-t.drainEvent.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index b7b8fec1804..d3796c256e2 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -28,6 +28,7 @@ import ( "fmt" "io" "net" + "strings" "sync" "sync/atomic" "time" @@ -362,8 +363,12 @@ func (s *Stream) SendCompress() string { // ClientAdvertisedCompressors returns the compressor names advertised by the // client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() string { - return s.clientAdvertisedCompressors +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values } // Done returns a channel which is closed when it receives the final status diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 49446825763..1e9485fd6e2 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -238,16 +244,13 @@ func copyOf(v []string) []string { return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index bd1c7d01b7e..d72f21c1b32 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -168,6 +168,9 @@ type BuildOptions struct { // field. In most cases though, it is not appropriate, and this field may // be ignored. Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string } // An Endpoint is one network endpoint, or server, which may have multiple @@ -314,3 +317,13 @@ type Resolver interface { // Close closes the resolver. Close() } + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c79bab12149..f845ac95893 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -75,6 +75,7 @@ func (ccr *ccResolverWrapper) start() error { DialCreds: ccr.cc.dopts.copts.TransportCredentials, CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, } var err error ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index b7723aa09cb..82493d237bc 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -189,6 +189,20 @@ type EmptyCallOption struct{} func (EmptyCallOption) before(*callInfo) error { return nil } func (EmptyCallOption) after(*callInfo, *csAttempt) {} +// StaticMethod returns a CallOption which specifies that a call is being made +// to a method that is static, which means the method is known at compile time +// and doesn't change at runtime. This can be used as a signal to stats plugins +// that this method is safe to include as a key to a measurement. +func StaticMethod() CallOption { + return StaticMethodCallOption{} +} + +// StaticMethodCallOption is a CallOption that specifies that a call comes +// from a static method. +type StaticMethodCallOption struct { + EmptyCallOption +} + // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. func Header(md *metadata.MD) CallOption { @@ -640,14 +654,18 @@ func encode(c baseCodec, msg any) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -726,17 +744,19 @@ type payloadInfo struct { uncompressedBytes []byte } -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, buf, err := p.recvMsg(maxReceiveMessageSize) +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, +) (uncompressedBuf []byte, cancel func(), err error) { + pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, err - } - if payInfo != nil { - payInfo.compressedLength = len(buf) + return nil, nil, err } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, st.Err() + return nil, nil, st.Err() } var size int @@ -744,21 +764,35 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - buf, err = dc.Do(bytes.NewReader(buf)) - size = len(buf) + uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + size = len(uncompressedBuf) } else { - buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } + } else { + uncompressedBuf = compressedBuf } - return buf, nil + + if payInfo != nil { + payInfo.compressedLength = len(compressedBuf) + payInfo.uncompressedBytes = uncompressedBuf + + cancel = func() {} + } else { + cancel = func() { + p.recvBufferPool.Put(&compressedBuf) + } + } + + return uncompressedBuf, cancel, nil } // Using compressor, decompress d, returning data and size. @@ -778,6 +812,9 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // size is used as an estimate to size the buffer, but we // will read more data if available. // +MinRead so ReadFrom will not reallocate if size is correct. + // + // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // we can also utilize the recv buffer pool here. buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return buf.Bytes(), int(bytesRead), err @@ -793,18 +830,15 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } + defer cancel() + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } - if payInfo != nil { - payInfo.uncompressedBytes = buf - } else { - p.recvBufferPool.Put(&buf) - } return nil } @@ -954,6 +988,7 @@ const ( SupportPackageIsVersion5 = true SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true + SupportPackageIsVersion8 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 682fa1831ec..a6a11704b34 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -33,8 +33,6 @@ import ( "sync/atomic" "time" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" @@ -74,9 +72,6 @@ func init() { return srv.isRegisteredMethod(method) } internal.ServerFromContext = serverFromContext - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) - } internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -134,12 +129,13 @@ type Server struct { drain bool cv *sync.Cond // signaled when connections close for GracefulStop services map[string]*serviceInfo // service name -> service info - events trace.EventLog + events traceEventLog quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines channelzID *channelz.Identifier czData *channelzData @@ -176,6 +172,7 @@ type serverOptions struct { headerTableSize *uint32 numServerWorkers uint32 recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ @@ -573,6 +570,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + // RecvBufferPool returns a ServerOption that configures the server // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. @@ -656,7 +668,7 @@ func NewServer(opt ...ServerOption) *Server { s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } if s.opts.numServerWorkers > 0 { @@ -932,6 +944,12 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } @@ -941,15 +959,6 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -1010,9 +1019,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) streamQuota.acquire() f := func() { defer streamQuota.release() + defer s.handlersWG.Done() s.handleStream(st, stream) } @@ -1331,7 +1342,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + + d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1342,6 +1354,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor t.IncrMsgRecv() } df := func(v any) error { + defer cancel() + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1721,8 +1735,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ctx = contextWithServer(ctx, s) var ti *traceInfo if EnableTracing { - tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) - ctx = trace.NewContext(ctx, tr) + tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = newTraceContext(ctx, tr) ti = &traceInfo{ tr: tr, firstLine: firstLine{ @@ -1911,6 +1925,10 @@ func (s *Server) stop(graceful bool) { s.serverWorkerChannelClose() } + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() + } + if s.events != nil { s.events.Finish() s.events = nil @@ -2102,7 +2120,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } - return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil + return stream.ClientAdvertisedCompressors(), nil } // SetTrailer sets the trailer metadata that will be sent when an RPC returns. @@ -2142,7 +2160,7 @@ func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. -func validateSendCompressor(name, clientCompressors string) error { +func validateSendCompressor(name string, clientCompressors []string) error { if name == encoding.Identity { return nil } @@ -2151,7 +2169,7 @@ func validateSendCompressor(name, clientCompressors string) error { return fmt.Errorf("compressor not registered %q", name) } - for _, c := range strings.Split(clientCompressors, ",") { + for _, c := range clientCompressors { if c == name { return nil // found match } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b14b2fbea2e..814e998354a 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -27,7 +27,6 @@ import ( "sync" "time" - "golang.org/x/net/trace" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" @@ -48,6 +47,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -184,7 +185,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -429,7 +430,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) var trInfo *traceInfo if EnableTracing { trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), + tr: newTrace("grpc.Sent."+methodFamily(method), method), firstLine: firstLine{ client: true, }, @@ -438,7 +439,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) trInfo.firstLine.deadline = time.Until(deadline) } trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) + ctx = newTraceContext(ctx, trInfo.tr) } if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 9ded79321ba..10f4f798f5e 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -26,8 +26,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/trace" ) // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. @@ -44,9 +42,31 @@ func methodFamily(m string) string { return m } +// traceEventLog mirrors golang.org/x/net/trace.EventLog. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceEventLog interface { + Printf(format string, a ...any) + Errorf(format string, a ...any) + Finish() +} + +// traceLog mirrors golang.org/x/net/trace.Trace. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceLog interface { + LazyLog(x fmt.Stringer, sensitive bool) + LazyPrintf(format string, a ...any) + SetError() + SetRecycler(f func(any)) + SetTraceInfo(traceID, spanID uint64) + SetMaxEvents(m int) + Finish() +} + // traceInfo contains tracing information for an RPC. type traceInfo struct { - tr trace.Trace + tr traceLog firstLine firstLine } diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go new file mode 100644 index 00000000000..1da3a2308e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_notrace.go @@ -0,0 +1,52 @@ +//go:build grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in +// turn enables binaries using gRPC-Go for dead code elimination, which can +// yield 10-15% improvements in binary size when tracing is not needed. + +import ( + "context" + "fmt" +) + +type notrace struct{} + +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} +func (notrace) LazyPrintf(format string, a ...any) {} +func (notrace) SetError() {} +func (notrace) SetRecycler(f func(any)) {} +func (notrace) SetTraceInfo(traceID, spanID uint64) {} +func (notrace) SetMaxEvents(m int) {} +func (notrace) Finish() {} + +func newTrace(family, title string) traceLog { + return notrace{} +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return ctx +} + +func newTraceEventLog(family, title string) traceEventLog { + return nil +} diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go new file mode 100644 index 00000000000..88d6e8571ee --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_withtrace.go @@ -0,0 +1,39 @@ +//go:build !grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + t "golang.org/x/net/trace" +) + +func newTrace(family, title string) traceLog { + return t.New(family, title) +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return t.NewContext(ctx, tr) +} + +func newTraceEventLog(family, title string) traceEventLog { + return t.NewEventLog(family, title) +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index dc2cea59c93..46ad8113ff4 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.60.1" +const Version = "1.62.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 896dc38f506..7a33c215b58 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -41,7 +41,7 @@ if [[ "$1" = "-install" ]]; then popd if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -88,7 +88,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -127,7 +127,7 @@ staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" # Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' # Error for duplicate imports not including grpc protos. grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused @@ -152,6 +152,7 @@ grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused XXXXX Protobuf related deprecation errors: "github.com/golang/protobuf .pb.go: +grpc_testing_not_regenerate : ptypes. proto.RegisterType XXXXX gRPC internal usage deprecation errors: @@ -184,9 +185,6 @@ GetSafeRegexMatch GetSuffixMatch GetTlsCertificateCertificateProviderInstance GetValidationContextCertificateProviderInstance -XXXXX TODO: Remove the below deprecation usages: -CloseNotifier -Roots.Subjects XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 00000000000..2ef36bbcf92 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go new file mode 100644 index 00000000000..ea276d15a02 --- /dev/null +++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoadapt bridges the original and new proto APIs. +package protoadapt + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type. +type MessageV1 = protoiface.MessageV1 + +// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the +// current [google.golang.org/protobuf] module, adding support for reflection. +type MessageV2 = proto.Message + +// MessageV1Of converts a v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1Of(m MessageV2) MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2Of converts a v1 message to a v2 message. +// It returns nil if m is nil. +func MessageV2Of(m MessageV1) MessageV2 { + return protoimpl.X.ProtoMessageV2Of(m) +} diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go index e4e740cad4c..e0811e8344c 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/utils/integer/integer.go @@ -16,6 +16,8 @@ limitations under the License. package integer +import "math" + // IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { @@ -65,9 +67,7 @@ func Int64Min(a, b int64) int64 { } // RoundToInt32 rounds floats into integer numbers. +// Deprecated: use math.Round() and a cast directly. func RoundToInt32(a float64) int32 { - if a < 0 { - return int32(a - 0.5) - } - return int32(a + 0.5) + return int32(math.Round(a)) } diff --git a/vendor/k8s.io/utils/set/set.go b/vendor/k8s.io/utils/set/set.go index 563bb43c28e..fc25b2feaa4 100644 --- a/vendor/k8s.io/utils/set/set.go +++ b/vendor/k8s.io/utils/set/set.go @@ -213,17 +213,3 @@ func (s Set[T]) Clone() Set[T] { func (s Set[T]) SymmetricDifference(s2 Set[T]) Set[T] { return s.Difference(s2).Union(s2.Difference(s)) } - -// Clear empties the set. -// It is preferable to replace the set with a newly constructed set, -// but not all callers can do that (when there are other references to the map). -// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN -// can't be cleared because NaN can't be removed. -// For sets containing items of a type that is reflexive for ==, -// this is optimized to a single call to runtime.mapclear(). -func (s Set[T]) Clear() Set[T] { - for key := range s { - delete(s, key) - } - return s -} diff --git a/vendor/k8s.io/utils/set/set_go_1.20.go b/vendor/k8s.io/utils/set/set_go_1.20.go new file mode 100644 index 00000000000..6ab79682b8e --- /dev/null +++ b/vendor/k8s.io/utils/set/set_go_1.20.go @@ -0,0 +1,33 @@ +//go:build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package set + +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +// In some cases the set *won't* be fully cleared, e.g. a Set[float32] containing NaN +// can't be cleared because NaN can't be removed. +// For sets containing items of a type that is reflexive for ==, +// this is optimized to a single call to runtime.mapclear(). +func (s Set[T]) Clear() Set[T] { + for key := range s { + delete(s, key) + } + return s +} diff --git a/vendor/k8s.io/utils/set/set_go_1.21.go b/vendor/k8s.io/utils/set/set_go_1.21.go new file mode 100644 index 00000000000..e95a368794b --- /dev/null +++ b/vendor/k8s.io/utils/set/set_go_1.21.go @@ -0,0 +1,27 @@ +//go:build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package set + +// Clear empties the set. +// It is preferable to replace the set with a newly constructed set, +// but not all callers can do that (when there are other references to the map). +func (s Set[T]) Clear() Set[T] { + clear(s) + return s +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0f9642cfe26..461ea85566c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -5,7 +5,7 @@ emperror.dev/errors ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/BurntSushi/toml v1.2.1 +# github.com/BurntSushi/toml v1.3.2 ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -24,7 +24,7 @@ github.com/Masterminds/sprig/v3 # github.com/Masterminds/squirrel v1.5.3 ## explicit; go 1.14 github.com/Masterminds/squirrel -# github.com/RoaringBitmap/roaring v1.2.3 +# github.com/RoaringBitmap/roaring v1.9.1 ## explicit; go 1.14 github.com/RoaringBitmap/roaring github.com/RoaringBitmap/roaring/internal @@ -93,8 +93,8 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/bits-and-blooms/bitset v1.7.0 -## explicit; go 1.14 +# github.com/bits-and-blooms/bitset v1.12.0 +## explicit; go 1.16 github.com/bits-and-blooms/bitset # github.com/blang/semver v3.5.1+incompatible ## explicit @@ -302,7 +302,7 @@ github.com/gobwas/glob/util/strings ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang/glog v1.1.2 +# github.com/golang/glog v1.2.0 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -313,15 +313,9 @@ github.com/golang/groupcache/lru # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.5.3 -## explicit; go 1.9 -github.com/golang/protobuf/jsonpb +# github.com/golang/protobuf v1.5.4 +## explicit; go 1.17 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/empty -github.com/golang/protobuf/ptypes/timestamp # github.com/gomodule/redigo v2.0.0+incompatible ## explicit # github.com/google/btree v1.0.1 @@ -360,17 +354,17 @@ github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gorilla/handlers v1.5.1 -## explicit; go 1.14 +# github.com/gorilla/handlers v1.5.2 +## explicit; go 1.20 github.com/gorilla/handlers -# github.com/gorilla/mux v1.8.0 -## explicit; go 1.12 +# github.com/gorilla/mux v1.8.1 +## explicit; go 1.20 github.com/gorilla/mux -# github.com/gorilla/websocket v1.5.0 -## explicit; go 1.12 +# github.com/gorilla/websocket v1.5.1 +## explicit; go 1.20 github.com/gorilla/websocket # github.com/gosuri/uitable v0.0.4 ## explicit @@ -488,7 +482,7 @@ github.com/lib/pq/scram # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de ## explicit github.com/liggitt/tabwriter -# github.com/longhorn/backing-image-manager v1.6.0-dev-20231217.0.20240103150452-7f8aea1edd03 +# github.com/longhorn/backing-image-manager v1.7.0-dev.0.20240326182459-c5288d745f4a ## explicit; go 1.21 github.com/longhorn/backing-image-manager/api github.com/longhorn/backing-image-manager/pkg/client @@ -496,7 +490,7 @@ github.com/longhorn/backing-image-manager/pkg/meta github.com/longhorn/backing-image-manager/pkg/rpc github.com/longhorn/backing-image-manager/pkg/types github.com/longhorn/backing-image-manager/pkg/util -# github.com/longhorn/backupstore v0.0.0-20240110081942-bd231cfb0c7b +# github.com/longhorn/backupstore v0.0.0-20240219094812-3a87ee02df77 ## explicit; go 1.21 github.com/longhorn/backupstore github.com/longhorn/backupstore/backupbackingimage @@ -509,7 +503,7 @@ github.com/longhorn/backupstore/s3 github.com/longhorn/backupstore/systembackup github.com/longhorn/backupstore/types github.com/longhorn/backupstore/util -# github.com/longhorn/go-common-libs v0.0.0-20240109042507-23627e6416b7 +# github.com/longhorn/go-common-libs v0.0.0-20240319112414-b75404dc7fbc ## explicit; go 1.21 github.com/longhorn/go-common-libs/exec github.com/longhorn/go-common-libs/io @@ -519,20 +513,20 @@ github.com/longhorn/go-common-libs/sync github.com/longhorn/go-common-libs/sys github.com/longhorn/go-common-libs/types github.com/longhorn/go-common-libs/utils -# github.com/longhorn/go-iscsi-helper v0.0.0-20240103085736-72aee873888a +# github.com/longhorn/go-iscsi-helper v0.0.0-20240308033847-bc3aab599425 ## explicit; go 1.21 github.com/longhorn/go-iscsi-helper/types -# github.com/longhorn/go-spdk-helper v0.0.0-20240117135122-26f8acb2a13d +# github.com/longhorn/go-spdk-helper v0.0.0-20240328085119-7ab2393959d9 ## explicit; go 1.21 github.com/longhorn/go-spdk-helper/pkg/types -# github.com/longhorn/longhorn-engine v1.6.0-dev-20240105.0.20240110095344-deb8b18a1558 +# github.com/longhorn/longhorn-engine v1.6.0 ## explicit; go 1.21 github.com/longhorn/longhorn-engine/pkg/meta github.com/longhorn/longhorn-engine/pkg/replica/client github.com/longhorn/longhorn-engine/pkg/types github.com/longhorn/longhorn-engine/pkg/util github.com/longhorn/longhorn-engine/proto/ptypes -# github.com/longhorn/longhorn-instance-manager v1.6.0-rc2.0.20240126090453-5a27dd0e4a81 +# github.com/longhorn/longhorn-instance-manager v1.7.0-dev.0.20240308123529-28ec1b59c683 ## explicit; go 1.21 github.com/longhorn/longhorn-instance-manager/pkg/api github.com/longhorn/longhorn-instance-manager/pkg/client @@ -540,7 +534,7 @@ github.com/longhorn/longhorn-instance-manager/pkg/imrpc github.com/longhorn/longhorn-instance-manager/pkg/meta github.com/longhorn/longhorn-instance-manager/pkg/types github.com/longhorn/longhorn-instance-manager/pkg/util -# github.com/longhorn/longhorn-manager v1.6.0 +# github.com/longhorn/longhorn-manager v1.7.0-dev.0.20240402173422-6df051064711 ## explicit; go 1.21 github.com/longhorn/longhorn-manager/csi/crypto github.com/longhorn/longhorn-manager/datastore @@ -564,7 +558,7 @@ github.com/longhorn/longhorn-manager/meta github.com/longhorn/longhorn-manager/scheduler github.com/longhorn/longhorn-manager/types github.com/longhorn/longhorn-manager/util -# github.com/longhorn/longhorn-share-manager v1.6.0-dev-20231217.0.20231226052309-99d57c1695ea +# github.com/longhorn/longhorn-share-manager v1.6.0 ## explicit; go 1.21 github.com/longhorn/longhorn-share-manager/pkg/client github.com/longhorn/longhorn-share-manager/pkg/crypto @@ -574,7 +568,7 @@ github.com/longhorn/longhorn-share-manager/pkg/server/nfs github.com/longhorn/longhorn-share-manager/pkg/types github.com/longhorn/longhorn-share-manager/pkg/util github.com/longhorn/longhorn-share-manager/pkg/volume -# github.com/longhorn/longhorn-spdk-engine v0.0.0-20240123044045-c5f14845bd83 +# github.com/longhorn/longhorn-spdk-engine v0.0.0-20240308061103-33861f26f36e ## explicit; go 1.21 github.com/longhorn/longhorn-spdk-engine/pkg/types github.com/longhorn/longhorn-spdk-engine/proto/spdkrpc @@ -595,9 +589,6 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.14 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 ## explicit github.com/mcuadros/go-version @@ -745,7 +736,7 @@ github.com/power-devops/perfstat ## explicit; go 1.17 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1 -# github.com/prometheus/client_golang v1.17.0 +# github.com/prometheus/client_golang v1.18.0 ## explicit; go 1.19 github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 @@ -753,15 +744,15 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 -## explicit; go 1.18 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.47.0 +## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -811,7 +802,7 @@ github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1 ## explicit; go 1.14 github.com/rancher/kubernetes-provider-detector github.com/rancher/kubernetes-provider-detector/providers -# github.com/rancher/lasso v0.0.0-20230830164424-d684fdeb6f29 +# github.com/rancher/lasso v0.0.0-20240123150939-7055397d6dfa ## explicit; go 1.20 github.com/rancher/lasso/pkg/cache github.com/rancher/lasso/pkg/client @@ -901,7 +892,7 @@ github.com/rancher/steve/pkg/summarycache github.com/rancher/system-upgrade-controller/pkg/apis/condition github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1 -# github.com/rancher/wrangler v1.1.1 +# github.com/rancher/wrangler v1.1.2 ## explicit; go 1.19 github.com/rancher/wrangler/pkg/apply github.com/rancher/wrangler/pkg/apply/injectors @@ -975,7 +966,7 @@ github.com/rubenv/sql-migrate/sqlparse github.com/russross/blackfriday/v2 # github.com/sergi/go-diff v1.2.0 ## explicit; go 1.12 -# github.com/shirou/gopsutil/v3 v3.23.7 +# github.com/shirou/gopsutil/v3 v3.24.2 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/common github.com/shirou/gopsutil/v3/disk @@ -1014,7 +1005,7 @@ github.com/tidwall/match # github.com/tidwall/pretty v1.2.0 ## explicit; go 1.16 github.com/tidwall/pretty -# github.com/urfave/cli v1.22.13 +# github.com/urfave/cli v1.22.14 ## explicit; go 1.11 github.com/urfave/cli # github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b @@ -1029,7 +1020,7 @@ github.com/xeipuuv/gojsonschema # github.com/xlab/treeprint v1.1.0 ## explicit; go 1.13 github.com/xlab/treeprint -# github.com/yusufpapurcu/wmi v1.2.3 +# github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 @@ -1142,7 +1133,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.13.0 +# golang.org/x/oauth2 v0.16.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -1226,17 +1217,17 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 +# google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 +# google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.60.1 +# google.golang.org/grpc v1.62.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1293,6 +1284,7 @@ google.golang.org/grpc/status google.golang.org/grpc/tap # google.golang.org/protobuf v1.33.0 ## explicit; go 1.17 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -1317,6 +1309,7 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto +google.golang.org/protobuf/protoadapt google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry @@ -1379,7 +1372,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.28.5 => k8s.io/api v0.26.13 +# k8s.io/api v0.28.6 => k8s.io/api v0.26.13 ## explicit; go 1.19 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1444,7 +1437,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.28.5 => k8s.io/apimachinery v0.27.10 +# k8s.io/apimachinery v0.29.3 => k8s.io/apimachinery v0.27.10 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/apitesting/fuzzer @@ -1963,10 +1956,10 @@ k8s.io/kubelet/pkg/apis/stats/v1alpha1 # k8s.io/kubernetes v1.28.5 => k8s.io/kubernetes v1.26.13 ## explicit; go 1.19 k8s.io/kubernetes/pkg/volume/util/hostutil -# k8s.io/mount-utils v0.28.5 => k8s.io/mount-utils v0.26.13 +# k8s.io/mount-utils v0.29.3 => k8s.io/mount-utils v0.26.13 ## explicit; go 1.19 k8s.io/mount-utils -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b +# k8s.io/utils v0.0.0-20240310230437-4693a0247e57 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock